code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __A( a_ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = Sql(
cache_dir=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , sql=SCREAMING_SNAKE_CASE_ , con=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE_ , download_mode=SCREAMING_SNAKE_CASE_ , verification_mode=SCREAMING_SNAKE_CASE_ , base_path=SCREAMING_SNAKE_CASE_ , )
# Build dataset for splits
UpperCamelCase__ = self.builder.as_dataset(
split="""train""" , verification_mode=SCREAMING_SNAKE_CASE_ , in_memory=self.keep_in_memory )
return dataset
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
UpperCamelCase__ = dataset
UpperCamelCase__ = name
UpperCamelCase__ = con
UpperCamelCase__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase__ = num_proc
UpperCamelCase__ = to_sql_kwargs
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.to_sql_kwargs.pop("""sql""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.to_sql_kwargs.pop("""con""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.to_sql_kwargs.pop("""index""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._write(index=SCREAMING_SNAKE_CASE_ , **self.to_sql_kwargs )
return written
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = args
UpperCamelCase__ = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
UpperCamelCase__ = query_table(
table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE_ , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase__ = batch.to_pandas()
UpperCamelCase__ = df.to_sql(self.name , self.con , index=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return num_rows or len(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCamelCase__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 244 |
"""simple docstring"""
from string import ascii_uppercase
_A = {str(ord(c) - 5_5): c for c in ascii_uppercase}
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
lowerCAmelCase__ : int = """"""
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = 0
while div != 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = divmod(__UpperCAmelCase , __UpperCAmelCase )
if base >= 11 and 9 < mod < 36:
lowerCAmelCase__ : Dict = ALPHABET_VALUES[str(__UpperCAmelCase )]
else:
lowerCAmelCase__ : Union[str, Any] = str(__UpperCAmelCase )
new_value += actual_value
lowerCAmelCase__ : Optional[Any] = num // base
lowerCAmelCase__ : Union[str, Any] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__UpperCAmelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 3_7):
for num in range(1_0_0_0):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 242 | 0 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Dict = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def _snake_case ( _snake_case : str , _snake_case : str ):
lowerCAmelCase : str = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
lowerCAmelCase : int = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase : Optional[Any] = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=_a , output_all_encodings=_a , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , _a ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase : List[str] = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase : List[str] = os.path.join(get_home_dir() , '''models''' )
lowerCAmelCase : Dict = _load_vocab(_a , _a , _a , cls=_a )
lowerCAmelCase : str = nlp.model.BERTModel(
_a , len(_a ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=_a , use_token_type_embed=_a , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=_a , use_decoder=_a , )
original_bort.load_parameters(_a , cast_dtype=_a , ignore_extra=_a )
lowerCAmelCase : Dict = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowerCAmelCase : str = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(_a ),
}
lowerCAmelCase : str = BertConfig.from_dict(_a )
lowerCAmelCase : Dict = BertForMaskedLM(_a )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_snake_case : int ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_snake_case : Optional[Any] , _snake_case : int ):
lowerCAmelCase : str = hf_param.shape
lowerCAmelCase : Union[str, Any] = to_torch(params[gluon_param] )
lowerCAmelCase : str = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
lowerCAmelCase : Optional[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
lowerCAmelCase : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
lowerCAmelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
lowerCAmelCase : str = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase : Dict = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase : BertSelfAttention = layer.attention.self
lowerCAmelCase : int = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
lowerCAmelCase : Any = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
lowerCAmelCase : Tuple = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
lowerCAmelCase : Any = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
lowerCAmelCase : Optional[Any] = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
lowerCAmelCase : List[str] = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
lowerCAmelCase : BertSelfOutput = layer.attention.output
lowerCAmelCase : str = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
lowerCAmelCase : int = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
lowerCAmelCase : Optional[int] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
lowerCAmelCase : str = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
lowerCAmelCase : BertIntermediate = layer.intermediate
lowerCAmelCase : Optional[int] = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
lowerCAmelCase : str = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
lowerCAmelCase : BertOutput = layer.output
lowerCAmelCase : int = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
lowerCAmelCase : str = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
lowerCAmelCase : Dict = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
lowerCAmelCase : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase : List[str] = RobertaTokenizer.from_pretrained('''roberta-base''' )
lowerCAmelCase : Optional[int] = tokenizer.encode_plus(_a )["input_ids"]
# Get gluon output
lowerCAmelCase : Optional[Any] = mx.nd.array([input_ids] )
lowerCAmelCase : Optional[Any] = original_bort(inputs=_a , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_a )
lowerCAmelCase : int = BertModel.from_pretrained(_a )
hf_bort_model.eval()
lowerCAmelCase : Tuple = tokenizer.encode_plus(_a , return_tensors='''pt''' )
lowerCAmelCase : int = hf_bort_model(**_a )[0]
lowerCAmelCase : List[str] = output_gluon[0].asnumpy()
lowerCAmelCase : Tuple = output_hf[0].detach().numpy()
lowerCAmelCase : List[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase : Dict = np.allclose(_a , _a , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , _a )
if __name__ == "__main__":
snake_case__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
snake_case__ : int = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 364 |
"""simple docstring"""
import numpy as np
from PIL import Image
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Dict = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : int = 0
lowerCAmelCase : Dict = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Union[str, Any] = 0
# compute the shape of the output matrix
lowerCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase : List[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : int = 0
lowerCAmelCase : Tuple = 0
return updated_arr
def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int ):
lowerCAmelCase : Union[str, Any] = np.array(_snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Any = 0
lowerCAmelCase : int = 0
lowerCAmelCase : int = 0
# compute the shape of the output matrix
lowerCAmelCase : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase : Dict = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase : Optional[int] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase : str = 0
lowerCAmelCase : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
snake_case__ : Optional[Any] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _lowercase ( __A ,__A ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__A ,__A ) ) )
def _lowercase ( __A ,__A ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
__UpperCamelCase = (
"""Wrong input data's dimensions... """
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(__A )
try:
if dataset.shape[1] != value_array.shape[1]:
__UpperCamelCase = (
"""Wrong input data's shape... """
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(__A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
__UpperCamelCase = (
"""Input data have different datatype... """
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(__A )
__UpperCamelCase = []
for value in value_array:
__UpperCamelCase = euclidean(__A ,dataset[0] )
__UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
__UpperCamelCase = euclidean(__A ,__A )
if dist > temp_dist:
__UpperCamelCase = temp_dist
__UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _lowercase ( __A ,__A ):
'''simple docstring'''
return np.dot(__A ,__A ) / (norm(__A ) * norm(__A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=4_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = TFPegasusModel(config=lowercase ).get_decoder()
__UpperCamelCase = inputs_dict["""input_ids"""]
__UpperCamelCase = input_ids[:1, :]
__UpperCamelCase = inputs_dict["""attention_mask"""][:1, :]
__UpperCamelCase = inputs_dict["""head_mask"""]
__UpperCamelCase = 1
# first forward pass
__UpperCamelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
__UpperCamelCase , __UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase = model(lowercase , attention_mask=lowercase )[0]
__UpperCamelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,__A=None ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = tf.cast(tf.math.not_equal(__A ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
__UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
__SCREAMING_SNAKE_CASE = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__SCREAMING_SNAKE_CASE = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__SCREAMING_SNAKE_CASE = '''google/pegasus-xsum'''
@cached_property
def __lowerCamelCase ( self ) -> int:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCamelCase ( self , **lowercase ) -> Optional[int]:
__UpperCamelCase = self.translate_src_text(**lowercase )
assert self.expected_text == generated_words
def __lowerCamelCase ( self , **lowercase ) -> Optional[Any]:
__UpperCamelCase = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors="""tf""" )
__UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
__UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )
return generated_words
@slow
def __lowerCamelCase ( self ) -> Dict:
self._assert_generated_batch_equal_expected()
| 349 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase__( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = AutoConfig.from_pretrained("""gpt2""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = {
"""max_new_tokens""": 10_24,
"""foo""": """bar""",
}
__lowerCamelCase = copy.deepcopy(UpperCamelCase_ )
__lowerCamelCase = generation_config.update(**UpperCamelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCamelCase_ , {"""foo""": """bar"""} )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
assert not hasattr(UpperCamelCase_ , """foo""" ) # no new kwargs should be initialized if from config
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCamelCase_ )
self.assertEqual(default_config.num_beams , 1 )
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCamelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase__( unittest.TestCase):
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] ):
__lowerCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""test-generation-config""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
| 29 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class snake_case_ ( __A ):
__A : str = "ctrl"
__A : Tuple = ["past_key_values"]
__A : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Dict , lowercase_ : Tuple=24_65_34 , lowercase_ : List[str]=2_56 , lowercase_ : Tuple=12_80 , lowercase_ : List[Any]=81_92 , lowercase_ : Union[str, Any]=48 , lowercase_ : Any=16 , lowercase_ : List[str]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=1E-6 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Optional[int]=True , **lowercase_ : Optional[Any] , ) -> Optional[int]:
lowercase__ : Union[str, Any] = vocab_size
lowercase__ : Optional[Any] = n_positions
lowercase__ : Optional[Any] = n_embd
lowercase__ : Tuple = n_layer
lowercase__ : List[str] = n_head
lowercase__ : Union[str, Any] = dff
lowercase__ : Dict = resid_pdrop
lowercase__ : Any = embd_pdrop
lowercase__ : List[str] = layer_norm_epsilon
lowercase__ : int = initializer_range
lowercase__ : Union[str, Any] = use_cache
super().__init__(**lowercase_ )
| 87 | """simple docstring"""
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str ) -> List[str]:
'''simple docstring'''
assert x is not None
assert y is not None
__lowerCamelCase : Optional[int] = len(_lowerCamelCase )
__lowerCamelCase : Optional[int] = len(_lowerCamelCase )
# declaring the array for storing the dp values
__lowerCamelCase : Any = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
__lowerCamelCase : Dict = 1 if x[i - 1] == y[j - 1] else 0
__lowerCamelCase : List[Any] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
__lowerCamelCase : int = ""
__lowerCamelCase , __lowerCamelCase : int = m, n
while i > 0 and j > 0:
__lowerCamelCase : Optional[int] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__lowerCamelCase : Any = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__A = '''AGGTAB'''
__A = '''GXTXAYB'''
__A = 4
__A = '''GTAB'''
__A, __A = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod() | 135 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Dict , snake_case :Optional[int] , snake_case :Tuple=7 , snake_case :Optional[int]=3 , snake_case :Union[str, Any]=30 , snake_case :int=400 , snake_case :Union[str, Any]=True , snake_case :int=None , snake_case :Union[str, Any]=0.9 , snake_case :Tuple=None , snake_case :Dict=True , snake_case :Dict=[0.5, 0.5, 0.5] , snake_case :int=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
A_ : Dict = size if size is not None else {"shortest_edge": 30}
A_ : str = crop_size if crop_size is not None else {"height": 30, "width": 30}
A_ : Dict = parent
A_ : Any = batch_size
A_ : int = num_channels
A_ : Tuple = min_resolution
A_ : int = max_resolution
A_ : Union[str, Any] = do_resize_and_center_crop
A_ : List[str] = size
A_ : str = crop_pct
A_ : Dict = crop_size
A_ : List[Any] = do_normalize
A_ : List[Any] = image_mean
A_ : Union[str, Any] = image_std
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __magic_name__ ( A_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = PoolFormerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : List[Any] = PoolFormerImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "crop_pct" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
A_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
A_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ : Union[str, Any] = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
A_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ : Tuple = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
A_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ : Any = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 360 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
_lowerCAmelCase : Dict[Optional[str], str] = {}
_lowerCAmelCase : Dict[Optional[str], Exception] = {}
def __snake_case ( _lowerCAmelCase : type , _lowerCAmelCase : Optional[str] , _lowerCAmelCase : Optional[List[str]] = None , ) -> List[Any]:
A_ : Any = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
A_ : str = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
A_ : Union[str, Any] = format_type
def __snake_case ( _lowerCAmelCase : Exception , _lowerCAmelCase : Optional[str] , _lowerCAmelCase : Optional[List[str]] = None ) -> Optional[int]:
A_ : Optional[Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
A_ : List[str] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
_lowerCAmelCase : str = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
_lowerCAmelCase : Tuple = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
_lowerCAmelCase : List[str] = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __snake_case ( _lowerCAmelCase : Optional[str] ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __snake_case ( _lowerCAmelCase : Optional[str] , **_lowerCAmelCase : str ) -> Formatter:
A_ : str = get_format_type_from_alias(_lowerCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_lowerCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 70 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Optional[Any] = '''xmod'''
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=2 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=("en_XX",) , UpperCamelCase__=None , **UpperCamelCase__ , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
snake_case : List[Any] = vocab_size
snake_case : List[Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : List[str] = hidden_act
snake_case : Union[str, Any] = intermediate_size
snake_case : int = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : Optional[int] = max_position_embeddings
snake_case : Tuple = type_vocab_size
snake_case : List[str] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[Any] = position_embedding_type
snake_case : int = use_cache
snake_case : Dict = classifier_dropout
snake_case : Dict = pre_norm
snake_case : Union[str, Any] = adapter_reduction_factor
snake_case : Any = adapter_layer_norm
snake_case : Optional[int] = adapter_reuse_layer_norm
snake_case : List[Any] = ln_before_adapter
snake_case : str = list(UpperCamelCase__ )
snake_case : int = default_language
class _lowerCAmelCase ( snake_case_ ):
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 203 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
snake_case : List[str] = len(lowercase )
for i in range(length - 1 ):
snake_case : List[str] = i
for k in range(i + 1 , lowercase ):
if collection[k] < collection[least]:
snake_case : List[str] = k
if least != i:
snake_case ,snake_case : Union[str, Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__snake_case = input("""Enter numbers separated by a comma:\n""").strip()
__snake_case = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 203 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : int = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
snake_case__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : int = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = """vit_msn"""
def __init__(self :str , _UpperCamelCase :List[Any]=768 , _UpperCamelCase :Optional[int]=12 , _UpperCamelCase :Any=12 , _UpperCamelCase :Dict=3072 , _UpperCamelCase :str="gelu" , _UpperCamelCase :str=0.0 , _UpperCamelCase :Union[str, Any]=0.0 , _UpperCamelCase :Optional[int]=0.0_2 , _UpperCamelCase :Any=1e-06 , _UpperCamelCase :Any=224 , _UpperCamelCase :Optional[Any]=16 , _UpperCamelCase :Any=3 , _UpperCamelCase :str=True , **_UpperCamelCase :Dict , )-> Union[str, Any]:
super().__init__(**_UpperCamelCase )
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = initializer_range
__A = layer_norm_eps
__A = image_size
__A = patch_size
__A = num_channels
__A = qkv_bias
| 250 | 1 |
import os
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Tuple:
lowercase : Dict = len(grid[0] )
lowercase : Dict = len(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = 0
lowercase : str = 0
lowercase : Tuple = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(n_rows - 3 ):
lowercase : int = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowercase : List[Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowercase : List[str] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowercase : int = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowercase : Optional[Any] = max(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if max_product > largest:
lowercase : Any = max_product
return largest
def _snake_case( ) -> Optional[Any]:
lowercase : List[Any] = []
with open(os.path.dirname(SCREAMING_SNAKE_CASE__ ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
lowercase : Dict = [[int(SCREAMING_SNAKE_CASE__ ) for i in grid[j]] for j in range(len(SCREAMING_SNAKE_CASE__ ) )]
return largest_product(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(solution())
| 20 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Optional[int] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
f"{test_file} instead." )
snake_case__ : Dict = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
snake_case__ : int = components[:-1] + [test_fn.replace(""".py""" , """""" )]
snake_case__ : int = """.""".join(_lowerCAmelCase )
return test_module_path
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ : str = get_module_path(_lowerCAmelCase )
snake_case__ : Union[str, Any] = importlib.import_module(_lowerCAmelCase )
return test_module
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : List[Any] = []
snake_case__ : Optional[int] = get_test_module(_lowerCAmelCase )
for attr in dir(_lowerCAmelCase ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : List[str] = []
snake_case__ : Any = get_test_module(_lowerCAmelCase )
for attr in dir(_lowerCAmelCase ):
snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
snake_case__ : List[str] = getattr(_lowerCAmelCase , """all_model_classes""" , [] )
if len(_lowerCAmelCase ) > 0:
test_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : Any = get_test_classes(_lowerCAmelCase )
snake_case__ : Optional[Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
snake_case__ : Optional[int] = test_class()
if hasattr(_lowerCAmelCase , """setUp""" ):
test.setUp()
snake_case__ : Any = None
if hasattr(_lowerCAmelCase , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
snake_case__ : Tuple = test.model_tester.__class__
return model_tester
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
snake_case__ : Union[str, Any] = get_test_classes(_lowerCAmelCase )
snake_case__ : str = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
snake_case__ : Optional[Any] = get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Union[str, Any] = []
for test_class in test_classes:
snake_case__ : Tuple = get_model_tester_from_test_class(_lowerCAmelCase )
if tester_class is not None:
tester_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
snake_case__ : Optional[Any] = get_test_classes(_lowerCAmelCase )
snake_case__ : Union[str, Any] = {test_class: get_model_tester_from_test_class(_lowerCAmelCase ) for test_class in test_classes}
return test_tester_mapping
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Any = get_model_classes(_lowerCAmelCase )
snake_case__ : Any = {
model_class: get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes
}
return model_test_mapping
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Union[str, Any] = get_model_classes(_lowerCAmelCase )
snake_case__ : str = {
model_class: get_tester_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def __snake_case( _lowerCAmelCase ) -> int:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return o
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return o.__name__
elif isinstance(_lowerCAmelCase , (list, tuple) ):
return [to_json(_lowerCAmelCase ) for x in o]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {to_json(_lowerCAmelCase ): to_json(_lowerCAmelCase ) for k, v in o.items()}
else:
return o
| 35 | 0 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _A ( __magic_name__ , __magic_name__=0.999 , __magic_name__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__magic_name__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__magic_name__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowercase__ = []
for i in range(__magic_name__ ):
lowercase__ = i / num_diffusion_timesteps
lowercase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__magic_name__ ) / alpha_bar_fn(__magic_name__ ) , __magic_name__ ) )
return torch.tensor(__magic_name__ , dtype=torch.floataa )
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = [e.name for e in KarrasDiffusionSchedulers]
__lowerCamelCase = 2
@register_to_config
def __init__( self :Optional[int] , _lowercase :int = 10_00 , _lowercase :float = 0.00085 , _lowercase :float = 0.012 , _lowercase :str = "linear" , _lowercase :Optional[Union[np.ndarray, List[float]]] = None , _lowercase :str = "epsilon" , _lowercase :str = "linspace" , _lowercase :int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowercase__ = torch.tensor(_lowercase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase__ = torch.linspace(_lowercase , _lowercase , _lowercase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowercase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase__ = betas_for_alpha_bar(_lowercase )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowercase__ = 1.0 - self.betas
lowercase__ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :Any , _lowercase :Tuple , _lowercase :List[Any]=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowercase__ = self.timesteps
lowercase__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowercase__ = 1 if len(_lowercase ) > 1 else 0
else:
lowercase__ = timestep.cpu().item() if torch.is_tensor(_lowercase ) else timestep
lowercase__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCAmelCase ( self :Dict , _lowercase :torch.FloatTensor , _lowercase :Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowercase__ = self.index_for_timestep(_lowercase )
if self.state_in_first_order:
lowercase__ = self.sigmas[step_index]
else:
lowercase__ = self.sigmas_interpol[step_index]
lowercase__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :Union[str, torch.device] = None , _lowercase :Optional[int] = None , ):
'''simple docstring'''
lowercase__ = num_inference_steps
lowercase__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowercase__ = np.linspace(0 , num_train_timesteps - 1 , _lowercase , dtype=_lowercase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowercase__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase__ = (np.arange(0 , _lowercase ) * step_ratio).round()[::-1].copy().astype(_lowercase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowercase__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase__ = (np.arange(_lowercase , 0 , -step_ratio )).round().copy().astype(_lowercase )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowercase__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowercase__ = torch.from_numpy(np.log(_lowercase ) ).to(_lowercase )
lowercase__ = np.interp(_lowercase , np.arange(0 , len(_lowercase ) ) , _lowercase )
lowercase__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowercase__ = torch.from_numpy(_lowercase ).to(device=_lowercase )
# interpolate sigmas
lowercase__ = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
lowercase__ = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
lowercase__ = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_lowercase ).startswith("mps" ):
# mps does not support float64
lowercase__ = torch.from_numpy(_lowercase ).to(_lowercase , dtype=torch.floataa )
else:
lowercase__ = torch.from_numpy(_lowercase ).to(_lowercase )
# interpolate timesteps
lowercase__ = self.sigma_to_t(_lowercase ).to(_lowercase , dtype=timesteps.dtype )
lowercase__ = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
lowercase__ = torch.cat([timesteps[:1], interleaved_timesteps] )
lowercase__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowercase__ = defaultdict(_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Union[str, Any] ):
'''simple docstring'''
lowercase__ = sigma.log()
# get distribution
lowercase__ = log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowercase__ = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
lowercase__ = low_idx + 1
lowercase__ = self.log_sigmas[low_idx]
lowercase__ = self.log_sigmas[high_idx]
# interpolate sigmas
lowercase__ = (low - log_sigma) / (low - high)
lowercase__ = w.clamp(0 , 1 )
# transform interpolation to time range
lowercase__ = (1 - w) * low_idx + w * high_idx
lowercase__ = t.view(sigma.shape )
return t
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return self.sample is None
def UpperCAmelCase ( self :str , _lowercase :Union[torch.FloatTensor, np.ndarray] , _lowercase :Union[float, torch.FloatTensor] , _lowercase :Union[torch.FloatTensor, np.ndarray] , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = self.index_for_timestep(_lowercase )
# advance index counter by 1
lowercase__ = timestep.cpu().item() if torch.is_tensor(_lowercase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowercase__ = self.sigmas[step_index]
lowercase__ = self.sigmas_interpol[step_index + 1]
lowercase__ = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowercase__ = self.sigmas[step_index - 1]
lowercase__ = self.sigmas_interpol[step_index]
lowercase__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowercase__ = 0
lowercase__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowercase__ = sigma_hat if self.state_in_first_order else sigma_interpol
lowercase__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = sigma_hat if self.state_in_first_order else sigma_interpol
lowercase__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowercase__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowercase__ = sigma_interpol - sigma_hat
# store for 2nd order step
lowercase__ = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowercase__ = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowercase__ = sigma_next - sigma_hat
lowercase__ = self.sample
lowercase__ = None
lowercase__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowercase )
def UpperCAmelCase ( self :str , _lowercase :torch.FloatTensor , _lowercase :torch.FloatTensor , _lowercase :torch.FloatTensor , ):
'''simple docstring'''
lowercase__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_lowercase ):
# mps does not support float64
lowercase__ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowercase__ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowercase__ = self.timesteps.to(original_samples.device )
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = [self.index_for_timestep(_lowercase , _lowercase ) for t in timesteps]
lowercase__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowercase__ = sigma.unsqueeze(-1 )
lowercase__ = original_samples + noise * sigma
return noisy_samples
def __len__( self :Any ):
'''simple docstring'''
return self.config.num_train_timesteps
| 201 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _A ( __magic_name__=32 , __magic_name__=10 , __magic_name__=100 , __magic_name__=1026 , __magic_name__=True , __magic_name__="data/tokenized_stories_train_wikitext103.jbl" , __magic_name__="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
lowercase__ , lowercase__ = generate_datasets(
__magic_name__ , __magic_name__ , number=__magic_name__ , min_len=1026 , trim=__magic_name__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
lowercase__ = load_gpta("gpt2" ).to(__magic_name__ )
print("computing perplexity on objective set" )
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ ).item()
print("perplexity on objective set:" , __magic_name__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _A ( __magic_name__ , __magic_name__=15 , __magic_name__=128 , __magic_name__=100 , __magic_name__="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
lowercase__ = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
lowercase__ = SecondaryLearner(__magic_name__ )
# Train secondary learner
lowercase__ = train_secondary_learner(
__magic_name__ , __magic_name__ , max_epochs=__magic_name__ , batch_size=__magic_name__ , eval_freq=100 , igf_model_path=__magic_name__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=32 , __magic_name__=1000 , __magic_name__=16 , __magic_name__=1.0 , __magic_name__=recopy_gpta , __magic_name__=None , __magic_name__=10 , __magic_name__="gpt2_finetuned.pt" , ):
lowercase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
lowercase__ = RandomSampler(__magic_name__ )
lowercase__ = DataLoader(__magic_name__ , sampler=__magic_name__ )
lowercase__ = max_steps // (len(__magic_name__ )) + 1
lowercase__ = 0
lowercase__ = torch.zeros((1, context_len) , dtype=torch.long , device=__magic_name__ )
lowercase__ , lowercase__ , lowercase__ = recopy_model(__magic_name__ , __magic_name__ , __magic_name__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(__magic_name__ )
secondary_learner.eval()
lowercase__ = []
lowercase__ = 0
lowercase__ = []
lowercase__ = []
# Compute the performance of the transformer model at the beginning
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ )
test_perps.append(__magic_name__ )
print("Test perplexity, step" , __magic_name__ , ":" , __magic_name__ )
for epoch in range(int(__magic_name__ ) ):
for step, example in enumerate(__magic_name__ ):
torch.cuda.empty_cache()
lowercase__ = random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase__ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase__ = model(__magic_name__ , labels=__magic_name__ )
lowercase__ = True
if secondary_learner is not None:
lowercase__ = secondary_learner.forward(
torch.tensor(__magic_name__ , dtype=torch.long , device=__magic_name__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__magic_name__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowercase__ = -1
if predicted_q < threshold:
lowercase__ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase__ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase__ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ )
test_perps.append(__magic_name__ )
print("Test perplexity, step" , __magic_name__ , ":" , __magic_name__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __magic_name__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _A ( ):
lowercase__ = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=__magic_name__ , default=__magic_name__ , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=__magic_name__ , default=__magic_name__ , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=__magic_name__ , type=__magic_name__ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=__magic_name__ , default=__magic_name__ , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=__magic_name__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=__magic_name__ , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=__magic_name__ , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=__magic_name__ , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=__magic_name__ , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=__magic_name__ , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=__magic_name__ , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=__magic_name__ , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=__magic_name__ , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=__magic_name__ , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=__magic_name__ , type=__magic_name__ , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=__magic_name__ , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=__magic_name__ , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=__magic_name__ , type=__magic_name__ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__magic_name__ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
lowercase__ = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
lowercase__ = training_secondary_learner(
__magic_name__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
lowercase__ = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowercase__ , lowercase__ = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=__magic_name__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__magic_name__ , __magic_name__ , __magic_name__ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__magic_name__ , secondary_learner=__magic_name__ , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 201 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__a :List[str] = logging.getLogger(__name__)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowerCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowerCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowerCamelCase : Optional[str] = field(
default=snake_case_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
_lowerCamelCase : str = field(metadata={'help': 'Should contain the data files for the task.'} )
_lowerCamelCase : int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowerCamelCase : bool = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __snake_case ( ):
"""simple docstring"""
A_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A_ , A_ , A_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" ,__UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
A_ = processors[data_args.task_name]()
A_ = processor.get_labels()
A_ = len(__UpperCamelCase )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=__UpperCamelCase ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
A_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
A_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=__UpperCamelCase ,cache_dir=model_args.cache_dir ,)
# Get datasets
A_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=__UpperCamelCase ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
A_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=__UpperCamelCase ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase : EvalPrediction ) -> Dict:
A_ = np.argmax(p.predictions ,axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase ,p.label_ids )}
# Data collator
A_ = DataCollatorWithPadding(__UpperCamelCase ,pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A_ = Trainer(
model=__UpperCamelCase ,args=__UpperCamelCase ,train_dataset=__UpperCamelCase ,eval_dataset=__UpperCamelCase ,compute_metrics=__UpperCamelCase ,data_collator=__UpperCamelCase ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A_ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A_ = trainer.evaluate()
A_ = os.path.join(training_args.output_dir ,"eval_results.txt" )
if trainer.is_world_master():
with open(__UpperCamelCase ,"w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" ,__UpperCamelCase ,__UpperCamelCase )
writer.write("%s = %s\n" % (key, value) )
results.update(__UpperCamelCase )
return results
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 312 |
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) )
if __name__ == "__main__":
print(solution()) | 312 | 1 |
from __future__ import annotations
def A_ ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> tuple[int, float, str]:
UpperCamelCase : Union[str, Any] = cipher_alphabet or [chr(_lowerCAmelCase ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCamelCase : Optional[int] = {
"a": 0.08_497,
"b": 0.01_492,
"c": 0.02_202,
"d": 0.04_253,
"e": 0.11_162,
"f": 0.02_228,
"g": 0.02_015,
"h": 0.06_094,
"i": 0.07_546,
"j": 0.00_153,
"k": 0.01_292,
"l": 0.04_025,
"m": 0.02_406,
"n": 0.06_749,
"o": 0.07_507,
"p": 0.01_929,
"q": 0.00_095,
"r": 0.07_587,
"s": 0.06_327,
"t": 0.09_356,
"u": 0.02_758,
"v": 0.00_978,
"w": 0.02_560,
"x": 0.00_150,
"y": 0.01_994,
"z": 0.00_077,
}
else:
# Custom frequencies dictionary
UpperCamelCase : Optional[int] = frequencies_dict
if not case_sensitive:
UpperCamelCase : Dict = ciphertext.lower()
# Chi squared statistic values
UpperCamelCase : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_lowerCAmelCase ) ):
UpperCamelCase : Optional[Any] = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCamelCase : List[Any] = (alphabet_letters.index(letter.lower() ) - shift) % len(
_lowerCAmelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCamelCase : Optional[Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCamelCase : Union[str, Any] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase : List[Any] = decrypted_with_shift.lower().count(_lowerCAmelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase : Any = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase : Any = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase : Tuple = decrypted_with_shift.count(_lowerCAmelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase : Dict = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase : Optional[int] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCamelCase : str = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_lowerCAmelCase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCamelCase : int = min(
_lowerCAmelCase , key=_lowerCAmelCase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCamelCase
) , (
UpperCamelCase
) ,
) : int = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 140 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : str = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 140 | 1 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = tokenizer(example["""content"""] ,truncation=_a )["input_ids"]
_SCREAMING_SNAKE_CASE = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split='''train''')
print(f"Dataset loaded in {time.time()-t_start:.2f}s")
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f"Dataset tokenized in {time.time()-t_start:.2f}s")
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"Data pushed to the hub in {time.time()-t_start:.2f}s")
| 306 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE : Tuple = tokenizer("Hello there" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : str = model(a , labels=a ).loss
SCREAMING_SNAKE_CASE : Any = -tf.math.reduce_mean(a ).numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 ) | 76 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
__SCREAMING_SNAKE_CASE = (
"Wrong input data's dimensions... "
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(lowerCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
__SCREAMING_SNAKE_CASE = (
"Wrong input data's shape... "
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(lowerCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
__SCREAMING_SNAKE_CASE = (
"Input data have different datatype... "
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = []
for value in value_array:
__SCREAMING_SNAKE_CASE = euclidean(lowerCAmelCase_ , dataset[0] )
__SCREAMING_SNAKE_CASE = dataset[0].tolist()
for dataset_value in dataset[1:]:
__SCREAMING_SNAKE_CASE = euclidean(lowerCAmelCase_ , lowerCAmelCase_ )
if dist > temp_dist:
__SCREAMING_SNAKE_CASE = temp_dist
__SCREAMING_SNAKE_CASE = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return np.dot(lowerCAmelCase_ , lowerCAmelCase_ ) / (norm(lowerCAmelCase_ ) * norm(lowerCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [0 for i in range(r + 1 )]
# nc0 = 1
__SCREAMING_SNAKE_CASE = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ , lowerCAmelCase_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 195 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCamelCase__ : Union[str, Any] = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ : str = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ : int = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ : str = model(__magic_name__ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape, __magic_name__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1], __magic_name__, atol=1E-3 ) )
@slow
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Dict = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
UpperCamelCase__ : str = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ : Optional[Any] = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ : Optional[int] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ : Optional[int] = model(__magic_name__ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape, __magic_name__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1], __magic_name__, atol=1E-3 ) )
| 201 |
import random
def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: Optional[int] ) -> tuple:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(__UpperCAmelCase )
elif element > pivot:
greater.append(__UpperCAmelCase )
else:
equal.append(__UpperCAmelCase )
return less, equal, greater
def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: int ) -> List[str]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__UpperCAmelCase ) or index < 0:
return None
UpperCamelCase__ : List[str] = items[random.randint(0 , len(__UpperCAmelCase ) - 1 )]
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : int = _partition(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] = len(__UpperCAmelCase )
UpperCamelCase__ : Dict = len(__UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__UpperCAmelCase , __UpperCAmelCase )
# must be in larger
else:
return quick_select(__UpperCAmelCase , index - (m + count) )
| 201 | 1 |
"""simple docstring"""
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''microsoft/speecht5_tts'''
lowerCamelCase = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
lowerCamelCase = '''text_reader'''
lowerCamelCase = SpeechTaProcessor
lowerCamelCase = SpeechTaForTextToSpeech
lowerCamelCase = SpeechTaHifiGan
lowerCamelCase = ['''text''']
lowerCamelCase = ['''audio''']
def _lowerCAmelCase ( self ) -> List[Any]:
if self.post_processor is None:
_lowerCAmelCase ="""microsoft/speecht5_hifigan"""
super().setup()
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> Tuple:
_lowerCAmelCase =self.pre_processor(text=__UpperCAmelCase , return_tensors="""pt""" , truncation=__UpperCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase =load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase =torch.tensor(embeddings_dataset[73_05]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
with torch.no_grad():
return self.model.generate_speech(**__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
with torch.no_grad():
return self.post_processor(__UpperCAmelCase ).cpu().detach()
| 359 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
__A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
__A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple:
_lowerCAmelCase =compute_bleu(
reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 341 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __snake_case ( _SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = TextToVideoSDPipeline
lowercase = TEXT_TO_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowercase = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
])
def __lowercase ( self : List[str] ) -> str:
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
lowerCAmelCase_ : str = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowerCAmelCase_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
lowerCAmelCase_ : Any = CLIPTextModel(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowercase ( self : int , lowerCamelCase : Dict , lowerCamelCase : int=0 ) -> Union[str, Any]:
if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
lowerCAmelCase_ : Dict = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase_ : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def __lowercase ( self : Optional[int] ) -> Tuple:
lowerCAmelCase_ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Optional[int] = self.get_dummy_components()
lowerCAmelCase_ : str = TextToVideoSDPipeline(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : List[str] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : int = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : int = 'np'
lowerCAmelCase_ : Optional[int] = sd_pipe(**_SCREAMING_SNAKE_CASE ).frames
lowerCAmelCase_ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowerCAmelCase_ : List[Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : List[Any] ) -> List[Any]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowercase ( self : Union[str, Any] ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __lowercase ( self : Dict ) -> str:
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __lowercase ( self : int ) -> List[Any]:
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __lowercase ( self : List[str] ) -> Any:
pass
def __lowercase ( self : Dict ) -> Dict:
return super().test_progress_bar()
@slow
@skip_mps
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : List[str] ) -> Optional[int]:
lowerCAmelCase_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
lowerCAmelCase_ : Union[str, Any] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
lowerCAmelCase_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase_ : Any = pipe.to("""cuda""" )
lowerCAmelCase_ : Tuple = 'Spiderman is surfing'
lowerCAmelCase_ : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="""pt""" ).frames
lowerCAmelCase_ : List[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def __lowercase ( self : Any ) -> Any:
lowerCAmelCase_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
lowerCAmelCase_ : Union[str, Any] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
lowerCAmelCase_ : Any = pipe.to("""cuda""" )
lowerCAmelCase_ : Any = 'Spiderman is surfing'
lowerCAmelCase_ : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ : int = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""pt""" ).frames
lowerCAmelCase_ : Tuple = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 120 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase : Optional[int] = ['text', 'image', 'audio']
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(a , a ):
inputs.append(create_inputs(a ) )
else:
raise ValueError(f"Invalid type requested: {input_type}" )
return inputs
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = []
for output in outputs:
if isinstance(a , (str, AgentText) ):
output_types.append('text' )
elif isinstance(a , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(a , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f"Invalid output: {output}" )
return output_types
@is_tool_test
class _A :
def UpperCAmelCase ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tool.inputs
for _input in inputs:
if isinstance(_input , _SCREAMING_SNAKE_CASE ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_ : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tool(*_SCREAMING_SNAKE_CASE )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_ : List[Any] = [outputs]
self.assertListEqual(output_types(_SCREAMING_SNAKE_CASE ) , self.tool.outputs )
def UpperCAmelCase ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_ : List[str] = self.tool(*_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : str = [outputs]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
for output, output_type in zip(_SCREAMING_SNAKE_CASE , self.tool.outputs ):
SCREAMING_SNAKE_CASE_ : Tuple = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_ : Tuple = []
for _input, input_type in zip(_SCREAMING_SNAKE_CASE , self.tool.inputs ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tool(*_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [outputs]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
| 253 | 0 |
from math import ceil
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = list(range(0 , SCREAMING_SNAKE_CASE ) )
A_ : List[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
A_ : Optional[Any] = []
for i in device_map_blocks:
if device_map_blocks.count(SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(SCREAMING_SNAKE_CASE )
# Missing blocks
A_ : List[Any] = [i for i in blocks if i not in device_map_blocks]
A_ : Tuple = [i for i in device_map_blocks if i not in blocks]
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : List[str] = list(range(SCREAMING_SNAKE_CASE ) )
A_ : str = int(ceil(n_layers / len(SCREAMING_SNAKE_CASE ) ) )
A_ : List[Any] = [layers[i : i + n_blocks] for i in range(0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
return dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
| 65 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = ["image_processor"]
snake_case = "SamImageProcessor"
def __init__( self , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE )
A_ : Any = self.image_processor
A_ : Optional[int] = -10
A_ : List[Any] = self.image_processor.size['''longest_edge''']
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->BatchEncoding:
'''simple docstring'''
A_ : Union[str, Any] = self.image_processor(
_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# pop arguments that are not used in the foward but used nevertheless
A_ : Tuple = encoding_image_processor['''original_sizes''']
if hasattr(_SCREAMING_SNAKE_CASE , '''numpy''' ): # Checks if Torch or TF tensor
A_ : int = original_sizes.numpy()
A_ , A_ , A_ : str = self._check_and_preprocess_points(
input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , input_boxes=_SCREAMING_SNAKE_CASE , )
A_ : Optional[Any] = self._normalize_and_convert(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , input_boxes=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , )
return encoding_image_processor
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="pt" , )->Dict:
'''simple docstring'''
if input_points is not None:
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = [
self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , original_sizes[0] ) for point in input_points
]
else:
A_ : str = [
self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for point, original_size in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
A_ , A_ : Optional[Any] = self._pad_points_and_labels(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[str] = np.array(_SCREAMING_SNAKE_CASE )
if input_labels is not None:
A_ : Dict = np.array(_SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
A_ : Tuple = [
self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , original_sizes[0] , is_bounding_box=_SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
A_ : List[Any] = [
self._normalize_coordinates(self.target_size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , is_bounding_box=_SCREAMING_SNAKE_CASE )
for box, original_size in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
A_ : Union[str, Any] = np.array(_SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
A_ : Dict = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
A_ : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
A_ : Optional[int] = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
A_ : List[Any] = tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
A_ : Union[str, Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
A_ : Union[str, Any] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
A_ : List[str] = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
A_ : Union[str, Any] = tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
A_ : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
A_ : List[Any] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
A_ : int = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
A_ : List[Any] = tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
A_ : Optional[Any] = max([point.shape[0] for point in input_points] )
A_ : int = []
for i, point in enumerate(_SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
A_ : Optional[int] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
A_ : int = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = processed_input_points
return input_points, input_labels
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )->np.ndarray:
'''simple docstring'''
A_ , A_ : str = original_size
A_ , A_ : Dict = self.image_processor._get_preprocess_shape(_SCREAMING_SNAKE_CASE , longest_edge=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = deepcopy(_SCREAMING_SNAKE_CASE ).astype(_SCREAMING_SNAKE_CASE )
if is_bounding_box:
A_ : Union[str, Any] = coords.reshape(-1 , 2 , 2 )
A_ : Any = coords[..., 0] * (new_w / old_w)
A_ : List[str] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
A_ : str = coords.reshape(-1 , 4 )
return coords
def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , )->str:
'''simple docstring'''
if input_points is not None:
if hasattr(_SCREAMING_SNAKE_CASE , '''numpy''' ): # Checks for TF or Torch tensor
A_ : List[str] = input_points.numpy().tolist()
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0] , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Input points must be a list of list of floating points.''' )
A_ : Optional[Any] = [np.array(_SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
A_ : Tuple = None
if input_labels is not None:
if hasattr(_SCREAMING_SNAKE_CASE , '''numpy''' ):
A_ : Dict = input_labels.numpy().tolist()
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0] , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Input labels must be a list of list integers.''' )
A_ : Union[str, Any] = [np.array(_SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
A_ : str = None
if input_boxes is not None:
if hasattr(_SCREAMING_SNAKE_CASE , '''numpy''' ):
A_ : str = input_boxes.numpy().tolist()
if (
not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0] , _SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0] , _SCREAMING_SNAKE_CASE )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
A_ : Tuple = [np.array(_SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
A_ : Dict = None
return input_points, input_labels, input_boxes
@property
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(_SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
return self.image_processor.post_process_masks(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 65 | 1 |
'''simple docstring'''
def lowercase__ ( __lowercase : int , __lowercase : int ) -> int:
"""simple docstring"""
while b:
__UpperCamelCase , __UpperCamelCase = b, a % b
return a
def lowercase__ ( __lowercase : int , __lowercase : int ) -> int:
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(__lowercase , a % b )
def lowercase__ ( ) -> Union[str, Any]:
"""simple docstring"""
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 53 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[Any] =logging.get_logger(__name__)
a__ : List[Any] ={
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="altclip_text_model"
def __init__( self : str , __A : List[Any]=2_5_0_0_0_2 , __A : Any=1_0_2_4 , __A : int=2_4 , __A : Dict=1_6 , __A : Optional[Any]=4_0_9_6 , __A : Union[str, Any]="gelu" , __A : Dict=0.1 , __A : Dict=0.1 , __A : List[str]=5_1_4 , __A : Optional[int]=1 , __A : int=0.02 , __A : Optional[Any]=0.02 , __A : Optional[Any]=1e-05 , __A : Dict=1 , __A : List[Any]=0 , __A : int=2 , __A : Tuple="absolute" , __A : Optional[Any]=True , __A : Optional[int]=7_6_8 , **__A : List[str] , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = initializer_factor
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = project_dim
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="altclip_vision_model"
def __init__( self : List[Any] , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=3_0_7_2 , __A : Optional[Any]=5_1_2 , __A : Tuple=1_2 , __A : Union[str, Any]=1_2 , __A : Optional[int]=3 , __A : Dict=2_2_4 , __A : Tuple=3_2 , __A : str="quick_gelu" , __A : Dict=1e-5 , __A : Optional[int]=0.0 , __A : List[Any]=0.02 , __A : int=1.0 , **__A : Optional[int] , ):
super().__init__(**__A )
__UpperCamelCase = hidden_size
__UpperCamelCase = intermediate_size
__UpperCamelCase = projection_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = num_channels
__UpperCamelCase = patch_size
__UpperCamelCase = image_size
__UpperCamelCase = initializer_range
__UpperCamelCase = initializer_factor
__UpperCamelCase = attention_dropout
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = hidden_act
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ):
cls._set_token_in_kwargs(__A )
__UpperCamelCase , __UpperCamelCase = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
__UpperCamelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] ="altclip"
SCREAMING_SNAKE_CASE_ : Optional[int] =True
def __init__( self : Any , __A : List[str]=None , __A : List[Any]=None , __A : List[str]=7_6_8 , __A : List[str]=2.6592 , **__A : Dict ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
__UpperCamelCase = kwargs.pop('text_config_dict' , __A )
__UpperCamelCase = kwargs.pop('vision_config_dict' , __A )
super().__init__(**__A )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__UpperCamelCase = {}
# This is the complete result when using `text_config_dict`.
__UpperCamelCase = AltCLIPTextConfig(**__A ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__UpperCamelCase = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
__UpperCamelCase = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(__A )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__UpperCamelCase = {}
# This is the complete result when using `vision_config_dict`.
__UpperCamelCase = AltCLIPVisionConfig(**__A ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__UpperCamelCase = {
str(__A ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__UpperCamelCase = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
__UpperCamelCase = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(__A )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__UpperCamelCase = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
__UpperCamelCase = AltCLIPTextConfig(**__A )
__UpperCamelCase = AltCLIPVisionConfig(**__A )
__UpperCamelCase = projection_dim
__UpperCamelCase = logit_scale_init_value
__UpperCamelCase = 1.0
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , __A : AltCLIPTextConfig , __A : AltCLIPVisionConfig , **__A : Optional[Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.text_config.to_dict()
__UpperCamelCase = self.vision_config.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 53 | 1 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=A__ , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=A__ , default=5 )
parser.add_argument('''--batch_size''' , type=A__ , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=A__ , default=1 )
parser.add_argument('''--freeze''' , type=A__ , default=A__ )
parser.add_argument('''--learning_rate''' , type=A__ , default=5E-4 )
parser.add_argument('''--seed''' , type=A__ , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=A__ , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=A__ , default=10 )
parser.add_argument('''--weight_decay''' , type=A__ , default=0.01 )
parser.add_argument('''--output_dir''' , type=A__ , default='''./results''' )
return parser.parse_args()
__magic_name__ = load("accuracy")
def _lowerCAmelCase ( A__: int ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = eval_pred
UpperCAmelCase = np.argmax(A__ , axis=1 )
return metric.compute(predictions=A__ , references=A__ )
class lowercase ( A__ ):
'''simple docstring'''
def __init__( self , _snake_case ) -> None:
"""simple docstring"""
super().__init__()
UpperCAmelCase = trainer
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , **_snake_case ) -> List[str]:
"""simple docstring"""
if control.should_evaluate:
UpperCAmelCase = deepcopy(_snake_case )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = get_args()
set_seed(args.seed )
UpperCAmelCase = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
UpperCAmelCase = dataset.train_test_split(test_size=0.2 )
UpperCAmelCase = train_test['''test'''].train_test_split(test_size=0.5 )
UpperCAmelCase = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase = tokenizer.eos_token
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
UpperCAmelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
UpperCAmelCase = False
UpperCAmelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(A__: List[str] ):
UpperCAmelCase = tokenizer(example['''src'''] , truncation=A__ , max_length=1024 )
UpperCAmelCase = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
UpperCAmelCase = train_test_validation.map(
A__ , batched=A__ , remove_columns=train_test_validation['''train'''].column_names , )
UpperCAmelCase = DataCollatorWithPadding(tokenizer=A__ )
UpperCAmelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
UpperCAmelCase = Trainer(
model=A__ , args=A__ , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=A__ , data_collator=A__ , compute_metrics=A__ , )
print('''Training...''' )
trainer.add_callback(CustomCallback(A__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 152 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["BeitFeatureExtractor"]
__magic_name__ = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 152 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase: Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Dict = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_UpperCamelCase: Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 255 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[str] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 32
@property
def __lowerCAmelCase ( self ) ->str:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Dict:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Tuple:
return 100
@property
def __lowerCAmelCase ( self ) ->int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->Any:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : str = self.dummy_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE : List[str] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int:
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0
SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Any = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : List[str] = pipeline(
image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313 | 0 |
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
__a =len(_snake_case )
__a =max(_snake_case )
__a =min(_snake_case )
# create the counting array
__a =coll_max + 1 - coll_min
__a =[0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , _snake_case ):
__a =counting_arr[i] + counting_arr[i - 1]
# create the output collection
__a =[0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , _snake_case ) ):
__a =collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def UpperCamelCase_( _snake_case : Optional[Any] ):
"""simple docstring"""
return "".join([chr(_snake_case ) for i in counting_sort([ord(_snake_case ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
_lowerCAmelCase : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
_lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 308 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
_lowerCAmelCase : Any = "pytorch_model.bin"
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The name of the task to train on.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
SCREAMING_SNAKE_CASE = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
SCREAMING_SNAKE_CASE = dataclasses.field(
default=lowerCAmelCase_ , metadata={'help': 'Random seed for initialization.'} , )
def UpperCamelCase_( _snake_case : int , _snake_case : str , _snake_case : Optional[int] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : List[Any] ):
"""simple docstring"""
__a =datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__a =dataset.filter(lambda _snake_case : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__a =int(eval_result * len(_snake_case ) )
print(_snake_case )
__a =dataset.sort('probability' , reverse=_snake_case )
__a =dataset.select(range(_snake_case ) )
__a =dataset.remove_columns(['label', 'probability'] )
__a =dataset.rename_column('prediction' , 'label' )
__a =dataset.map(lambda _snake_case : {"label": idalabel[example["label"]]} )
__a =dataset.shuffle(seed=args.seed )
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(_snake_case , index=_snake_case )
else:
dataset.to_json(_snake_case )
def UpperCamelCase_( _snake_case : List[Any] , _snake_case : str , _snake_case : int , _snake_case : Optional[int] , **_snake_case : List[str] ):
"""simple docstring"""
__a =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__a =STModelArguments(model_name_or_path=_snake_case )
__a =STDataArguments(train_file=_snake_case , infer_file=_snake_case )
__a =STTrainingArguments(output_dir=_snake_case )
__a =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_snake_case ).items():
setattr(_snake_case , _snake_case , _snake_case )
for key, value in kwargs.items():
if hasattr(_snake_case , _snake_case ):
setattr(_snake_case , _snake_case , _snake_case )
# Sanity checks
__a ={}
__a =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__a =args.train_file
__a =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__a =args.eval_file
for key in data_files:
__a =data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
__a =extension
else:
assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
__a =F'{args.output_dir}/self-train_iter-{{}}'.format
__a =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_snake_case )
os.makedirs(_snake_case , exist_ok=_snake_case )
accelerator.wait_for_everyone()
__a =None
__a =None
__a =0
__a =False
# Show the progress bar
__a =tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__a =data_dir_format(_snake_case )
assert os.path.exists(_snake_case )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__a =os.path.join(_snake_case , 'stage-1' )
__a ={
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_snake_case , _snake_case ):
arguments_dict.update({key: value} )
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , _snake_case )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__a =os.path.join(_snake_case , 'best-checkpoint' )
__a =os.path.join(_snake_case , 'stage-2' )
# Update arguments_dict
__a =model_path
__a =data_files['train']
__a =current_output_dir
__a =os.path.join(_snake_case , 'best-checkpoint' , _snake_case )
if os.path.exists(_snake_case ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , _snake_case , _snake_case , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , _snake_case )
finetune(**_snake_case )
accelerator.wait_for_everyone()
assert os.path.exists(_snake_case )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , _snake_case )
__a =iteration
__a =data_dir_format(iteration + 1 )
__a =AutoConfig.from_pretrained(os.path.join(_snake_case , 'best-checkpoint' ) )
__a =config.idalabel
__a =os.path.join(_snake_case , 'eval_results_best-checkpoint.json' )
__a =os.path.join(_snake_case , 'test_results_best-checkpoint.json' )
assert os.path.exists(_snake_case )
with open(_snake_case , 'r' ) as f:
__a =float(json.load(_snake_case )[args.eval_metric] )
__a =os.path.join(_snake_case , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(_snake_case )
# Loading the dataset from local csv or json files.
__a =load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
__a =load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(_snake_case , exist_ok=_snake_case )
shutil.copy(_snake_case , os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) )
if os.path.exists(_snake_case ):
shutil.copy(_snake_case , os.path.join(_snake_case , F'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
accelerator.wait_for_everyone()
__a =os.path.join(_snake_case , F'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__a =eval_result
if best_iteration is None:
__a =new_iteration
__a =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__a =new_iteration
__a =new_eval_result
__a =0
else:
if new_eval_result == best_eval_result:
__a =new_iteration
__a =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__a =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , _snake_case )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{iteration}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_snake_case , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(_snake_case , 'eval_results_best-iteration.json' ) , )
| 308 | 1 |
'''simple docstring'''
import math
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCAmelCase = range(3 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase=1 , **lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = factor * value
_lowerCAmelCase = value
while not is_prime(lowerCAmelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCAmelCase )
return value
| 70 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : Tuple ={
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int =['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any =[
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
A__ : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70 | 1 |
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 367 | from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : int , **lowercase_ : List[str] ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Tuple ) -> Any:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Any ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Dict , *lowercase_ : str , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ) -> List[str]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : Any ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Any ) -> Tuple:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Dict ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Dict:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[str] ) -> int:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[Any] = ["flax"]
def __init__( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : int ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Any , *lowercase_ : int , **lowercase_ : int ) -> Optional[int]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : str ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[int] = ["flax"]
def __init__( self : Any , *lowercase_ : str , **lowercase_ : Dict ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : List[str] , *lowercase_ : int , **lowercase_ : Union[str, Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : int ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[str] = ["flax"]
def __init__( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
| 333 | 0 |
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
if index == r:
for j in range(lowerCamelCase__ ):
print(data[j],end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_A : Tuple = arr[i]
combination_util(lowerCamelCase__,lowerCamelCase__,lowerCamelCase__,index + 1,lowerCamelCase__,i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowerCamelCase__,lowerCamelCase__,lowerCamelCase__,lowerCamelCase__,lowerCamelCase__,i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowerCamelCase__,lowerCamelCase__,lowerCamelCase__,0,lowerCamelCase__,0 )
if __name__ == "__main__":
# Driver code to check the function above
_snake_case = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 26 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252 | 0 |
import os
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : str = os.path.dirname(os.path.realpath(UpperCamelCase_ ) )
_lowerCAmelCase : Tuple = os.path.join(UpperCamelCase_ , """triangle.txt""" )
with open(UpperCamelCase_ ) as f:
_lowerCAmelCase : Optional[int] = f.readlines()
_lowerCAmelCase : Union[str, Any] = []
for line in triangle:
_lowerCAmelCase : Optional[int] = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(UpperCamelCase_ ) )
a.append(UpperCamelCase_ )
for i in range(1 , len(UpperCamelCase_ ) ):
for j in range(len(a[i] ) ):
_lowerCAmelCase : List[str] = a[i - 1][j] if j != len(a[i - 1] ) else 0
_lowerCAmelCase : Optional[Any] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(UpperCamelCase_ , UpperCamelCase_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 159 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __snake_case (_a ):
lowerCAmelCase__ = "ibert"
def __init__( self : int , _UpperCAmelCase : Optional[int]=3_0522 , _UpperCAmelCase : Union[str, Any]=768 , _UpperCAmelCase : str=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Any=3072 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : str=1E-12 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Any="none" , **_UpperCAmelCase : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = type_vocab_size
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : str = position_embedding_type
_lowerCAmelCase : int = quant_mode
_lowerCAmelCase : str = force_dequant
class __snake_case (_a ):
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 159 | 1 |
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase__ : Union[str, Any] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCamelCase__ : Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCamelCase__ : Optional[int] = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: bool = False ):
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE : Union[str, Any] = f.read()
__SCREAMING_SNAKE_CASE : Optional[int] = content.split("""\n""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
while line_idx < len(_lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = len(re.search(r"""^(\s*)\S""" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
__SCREAMING_SNAKE_CASE : Optional[int] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__SCREAMING_SNAKE_CASE : Tuple = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__SCREAMING_SNAKE_CASE : Any = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : _re_identifier.search(_lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
elif "\n".join(_lowerCamelCase ) != content:
return True
def lowerCAmelCase_ ( _lowerCamelCase: bool = False ):
__SCREAMING_SNAKE_CASE : Tuple = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for f in os.listdir(_lowerCamelCase ) if f.endswith(""".py""" )]
__SCREAMING_SNAKE_CASE : int = [sort_auto_mapping(_lowerCamelCase , overwrite=_lowerCamelCase ) for fname in fnames]
if not overwrite and any(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [f for f, d in zip(_lowerCamelCase , _lowerCamelCase ) if d]
raise ValueError(
F"The following files have auto mappings that need sorting: {', '.join(_lowerCamelCase )}. Run `make style` to fix"
""" this.""" )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCamelCase__ : str = parser.parse_args()
sort_all_auto_mappings(not args.check_only) | 112 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: int ):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase ) and number_of_steps > 0
), F"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = 1, 1
for _ in range(number_of_steps - 1 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod() | 112 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=10 , lowerCAmelCase__=3 , lowerCAmelCase__=32 * 8 , lowerCAmelCase__=32 * 8 , lowerCAmelCase__=4 , lowerCAmelCase__=64 , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_auxiliary_loss
SCREAMING_SNAKE_CASE = num_queries
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_size
SCREAMING_SNAKE_CASE = max_size
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = hidden_dim
SCREAMING_SNAKE_CASE = hidden_dim
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase__ ) > 0.5
).float()
SCREAMING_SNAKE_CASE = (torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase__ ) > 0.5).long()
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
SCREAMING_SNAKE_CASE = self.num_queries
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = [1, 1, 1, 1]
SCREAMING_SNAKE_CASE = self.num_channels
SCREAMING_SNAKE_CASE = 64
SCREAMING_SNAKE_CASE = 128
SCREAMING_SNAKE_CASE = self.hidden_dim
SCREAMING_SNAKE_CASE = self.hidden_dim
SCREAMING_SNAKE_CASE = self.hidden_dim
return config
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
SCREAMING_SNAKE_CASE = output.encoder_hidden_states
SCREAMING_SNAKE_CASE = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase__ ) , config.decoder_layers )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Any:
with torch.no_grad():
SCREAMING_SNAKE_CASE = MaskaFormerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
SCREAMING_SNAKE_CASE = MaskaFormerForUniversalSegmentation(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
def comm_check_on_output(lowerCAmelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
comm_check_on_output(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(
pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ )
comm_check_on_output(lowerCAmelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : str = False
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = MaskaFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def __A ( self ) -> str:
self.config_tester.run_common_tests()
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCAmelCase__ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def __A ( self ) -> Optional[int]:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def __A ( self ) -> Tuple:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def __A ( self ) -> List[str]:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def __A ( self ) -> List[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __A ( self ) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __A ( self ) -> Optional[Any]:
pass
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
@slow
def __A ( self ) -> Optional[Any]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
SCREAMING_SNAKE_CASE = MaskaFormerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE = {
'pixel_values': torch.randn((2, 3, *size) , device=lowerCAmelCase__ ),
'mask_labels': torch.randn((2, 10, *size) , device=lowerCAmelCase__ ),
'class_labels': torch.zeros(2 , 10 , device=lowerCAmelCase__ ).long(),
}
SCREAMING_SNAKE_CASE = self.model_tester.get_config()
SCREAMING_SNAKE_CASE = MaskaFormerForUniversalSegmentation(lowerCAmelCase__ ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ )
self.assertTrue(outputs.loss is not None )
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
self.assertTrue(outputs.attentions is not None )
def __A ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE = self.all_model_classes[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ).loss
loss.backward()
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = self.all_model_classes[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ )
model.train()
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCAmelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCamelCase = 1E-4
def lowercase () -> str:
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ) -> Any:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __A ( self ) -> Union[str, Any]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase__ ).eval()
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ )
# masks_queries_logits
SCREAMING_SNAKE_CASE = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
SCREAMING_SNAKE_CASE = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
SCREAMING_SNAKE_CASE = torch.tensor(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
# class_queries_logits
SCREAMING_SNAKE_CASE = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase__ ).eval()
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = inputs['pixel_values'].to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [el.to(lowerCAmelCase__ ) for el in inputs['mask_labels']]
SCREAMING_SNAKE_CASE = [el.to(lowerCAmelCase__ ) for el in inputs['class_labels']]
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__ )
self.assertTrue(outputs.loss is not None )
| 38 |
"""simple docstring"""
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = [0] * size
SCREAMING_SNAKE_CASE = [0] * size
@staticmethod
def __A ( lowerCAmelCase__ ) -> int:
return index | (index + 1)
@staticmethod
def __A ( lowerCAmelCase__ ) -> int:
return (index & (index + 1)) - 1
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE = value
while index < self.size:
SCREAMING_SNAKE_CASE = self.get_prev(lowerCAmelCase__ ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.get_next(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE = 0
while left <= right:
SCREAMING_SNAKE_CASE = self.get_prev(lowerCAmelCase__ )
if left <= current_left:
SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , self.tree[right] )
SCREAMING_SNAKE_CASE = current_left
else:
SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case_ : Tuple = "\nHuman: <<task>>\n\nAssistant: "
snake_case_ : List[str] = "huggingface-tools/default-prompts"
snake_case_ : int = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def A (__A : Tuple , __A : List[str] , __A : Any="run" ) -> Tuple:
"""simple docstring"""
if prompt_or_repo_id is None:
UpperCAmelCase_ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , __A ) is not None:
return prompt_or_repo_id
UpperCAmelCase_ = cached_file(
__A , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(__A , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 51 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case_ : List[Any] = data_utils.TransfoXLTokenizer
snake_case_ : int = data_utils.TransfoXLCorpus
snake_case_ : List[Any] = data_utils
snake_case_ : int = data_utils
def A (__A : Dict , __A : List[Any] , __A : Union[str, Any] , __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__A , '''rb''' ) as fp:
UpperCAmelCase_ = pickle.load(__A , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
UpperCAmelCase_ = corpus.vocab.__dict__
torch.save(__A , __A )
UpperCAmelCase_ = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __A )
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__A , __A )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCAmelCase_ = os.path.abspath(__A )
UpperCAmelCase_ = os.path.abspath(__A )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCAmelCase_ = TransfoXLConfig()
else:
UpperCAmelCase_ = TransfoXLConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = TransfoXLLMHeadModel(__A )
UpperCAmelCase_ = load_tf_weights_in_transfo_xl(__A , __A , __A )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(__A , __A )
UpperCAmelCase_ = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
snake_case_ : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 51 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_A : Tuple = None
_A : Optional[Any] = logging.get_logger(__name__)
_A : Optional[int] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_A : Optional[Any] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
_A : List[str] = {
'facebook/nllb-large-en-ro': 10_24,
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
_A : Optional[Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = ["input_ids", "attention_mask"]
_UpperCAmelCase : List[Any] = NllbTokenizer
_UpperCAmelCase : List[int] = []
_UpperCAmelCase : List[int] = []
def __init__( self : str , A : Any=None , A : int=None , A : Union[str, Any]="<s>" , A : List[Any]="</s>" , A : Union[str, Any]="</s>" , A : Dict="<s>" , A : Optional[Any]="<unk>" , A : int="<pad>" , A : str="<mask>" , A : Optional[int]=None , A : Tuple=None , A : Dict=None , A : Any=False , **A : Optional[Any] , ) ->Tuple:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
lowerCamelCase__ : int = legacy_behaviour
super().__init__(
vocab_file=A , tokenizer_file=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , src_lang=A , tgt_lang=A , additional_special_tokens=A , legacy_behaviour=A , **A , )
lowerCamelCase__ : Optional[Any] = vocab_file
lowerCamelCase__ : Tuple = False if not self.vocab_file else True
lowerCamelCase__ : int = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowerCamelCase__ : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ : int = src_lang if src_lang is not None else '''eng_Latn'''
lowerCamelCase__ : Optional[Any] = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowerCamelCase ( self : Any ) ->str:
return self._src_lang
@src_lang.setter
def __lowerCamelCase ( self : int , A : str ) ->None:
lowerCamelCase__ : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCamelCase ( self : Optional[Any] , A : List[int] , A : Optional[List[int]] = None ) ->List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCamelCase ( self : str , A : List[int] , A : Optional[List[int]] = None ) ->List[int]:
lowerCamelCase__ : str = [self.sep_token_id]
lowerCamelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self : List[Any] , A : List[str] , A : str , A : Optional[str] , A : Optional[str] , **A : int ) ->int:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCamelCase__ : List[str] = src_lang
lowerCamelCase__ : Optional[Any] = self(A , add_special_tokens=A , return_tensors=A , **A )
lowerCamelCase__ : int = self.convert_tokens_to_ids(A )
lowerCamelCase__ : Any = tgt_lang_id
return inputs
def __lowerCamelCase ( self : Optional[int] , A : List[str] , A : str = "eng_Latn" , A : Optional[List[str]] = None , A : str = "fra_Latn" , **A : Optional[int] , ) ->BatchEncoding:
lowerCamelCase__ : Dict = src_lang
lowerCamelCase__ : List[str] = tgt_lang
return super().prepare_seqaseq_batch(A , A , **A )
def __lowerCamelCase ( self : Optional[int] ) ->Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCamelCase ( self : Optional[int] ) ->Any:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCamelCase ( self : List[str] , A : str ) ->None:
lowerCamelCase__ : List[str] = self.convert_tokens_to_ids(A )
if self.legacy_behaviour:
lowerCamelCase__ : Any = []
lowerCamelCase__ : Dict = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : Optional[int] = [self.cur_lang_code]
lowerCamelCase__ : Optional[int] = [self.eos_token_id]
lowerCamelCase__ : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowerCamelCase ( self : str , A : str ) ->None:
lowerCamelCase__ : Optional[Any] = self.convert_tokens_to_ids(A )
if self.legacy_behaviour:
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Any = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ : List[str] = [self.cur_lang_code]
lowerCamelCase__ : Tuple = [self.eos_token_id]
lowerCamelCase__ : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ : List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowerCamelCase ( self : Any , A : str , A : Optional[str] = None ) ->Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(A ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
lowerCamelCase__ : int = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 265 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def _a ( UpperCAmelCase , UpperCAmelCase="shi-labs/oneformer_demo" ) -> Union[str, Any]:
"""simple docstring"""
with open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) as f:
lowerCamelCase__ : List[Any] = json.load(UpperCAmelCase )
lowerCamelCase__ : Dict = {}
lowerCamelCase__ : str = []
lowerCamelCase__ : Optional[Any] = []
for key, info in class_info.items():
lowerCamelCase__ : Union[str, Any] = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(UpperCAmelCase ) )
lowerCamelCase__ : Optional[int] = thing_ids
lowerCamelCase__ : Dict = class_names
return metadata
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Tuple , A : Tuple , A : List[Any]=7 , A : str=3 , A : List[str]=3_0 , A : Optional[int]=4_0_0 , A : int=None , A : Tuple=True , A : Dict=True , A : Dict=[0.5, 0.5, 0.5] , A : Tuple=[0.5, 0.5, 0.5] , A : int=1_0 , A : List[str]=False , A : Optional[Any]=2_5_5 , A : Union[str, Any]="shi-labs/oneformer_demo" , A : Optional[Any]="ade20k_panoptic.json" , A : str=1_0 , ) ->Dict:
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : List[str] = min_resolution
lowerCamelCase__ : str = max_resolution
lowerCamelCase__ : Optional[Any] = do_resize
lowerCamelCase__ : Any = {'''shortest_edge''': 3_2, '''longest_edge''': 1_3_3_3} if size is None else size
lowerCamelCase__ : str = do_normalize
lowerCamelCase__ : List[str] = image_mean
lowerCamelCase__ : List[str] = image_std
lowerCamelCase__ : Optional[int] = class_info_file
lowerCamelCase__ : Any = prepare_metadata(A , A )
lowerCamelCase__ : str = num_text
lowerCamelCase__ : Dict = repo_path
# for the post_process_functions
lowerCamelCase__ : str = 2
lowerCamelCase__ : Union[str, Any] = 1_0
lowerCamelCase__ : List[Any] = 1_0
lowerCamelCase__ : List[Any] = 3
lowerCamelCase__ : Tuple = 4
lowerCamelCase__ : List[Any] = num_labels
lowerCamelCase__ : List[Any] = do_reduce_labels
lowerCamelCase__ : List[Any] = ignore_index
def __lowerCamelCase ( self : str ) ->Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __lowerCamelCase ( self : List[str] , A : List[Any] , A : Tuple=False ) ->int:
if not batched:
lowerCamelCase__ : List[Any] = image_inputs[0]
if isinstance(A , Image.Image ):
lowerCamelCase__ , lowerCamelCase__ : str = image.size
else:
lowerCamelCase__ , lowerCamelCase__ : str = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase__ : Any = int(self.size['''shortest_edge'''] * h / w )
lowerCamelCase__ : Tuple = self.size['''shortest_edge''']
elif w > h:
lowerCamelCase__ : Union[str, Any] = self.size['''shortest_edge''']
lowerCamelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCamelCase__ : Union[str, Any] = self.size['''shortest_edge''']
lowerCamelCase__ : Optional[Any] = self.size['''shortest_edge''']
else:
lowerCamelCase__ : Any = []
for image in image_inputs:
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase__ : Optional[Any] = max(A , key=lambda A : item[0] )[0]
lowerCamelCase__ : List[str] = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
def __lowerCamelCase ( self : Optional[int] ) ->List[str]:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Optional[int] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_UpperCAmelCase : Dict = image_processing_class
def __lowerCamelCase ( self : Optional[int] ) ->str:
lowerCamelCase__ : Optional[int] = OneFormerImageProcessorTester(self )
@property
def __lowerCamelCase ( self : List[str] ) ->Any:
return self.image_processing_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self : int ) ->Tuple:
lowerCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
self.assertTrue(hasattr(A , '''ignore_index''' ) )
self.assertTrue(hasattr(A , '''class_info_file''' ) )
self.assertTrue(hasattr(A , '''num_text''' ) )
self.assertTrue(hasattr(A , '''repo_path''' ) )
self.assertTrue(hasattr(A , '''metadata''' ) )
self.assertTrue(hasattr(A , '''do_reduce_labels''' ) )
def __lowerCamelCase ( self : Any ) ->Tuple:
pass
def __lowerCamelCase ( self : Optional[Any] ) ->Optional[int]:
# Initialize image_processor
lowerCamelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : List[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowerCamelCase__ : Any = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.image_processing_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.image_processing_tester.get_expected_values(A , batched=A )
lowerCamelCase__ : List[str] = image_processor(
A , ['''semantic'''] * len(A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self : Tuple ) ->Tuple:
# Initialize image_processor
lowerCamelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowerCamelCase__ : List[str] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Dict = self.image_processing_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processing_tester.get_expected_values(A , batched=A )
lowerCamelCase__ : int = image_processor(
A , ['''semantic'''] * len(A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self : int ) ->str:
# Initialize image_processor
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowerCamelCase__ : int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : int = self.image_processing_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.image_processing_tester.get_expected_values(A , batched=A )
lowerCamelCase__ : Any = image_processor(
A , ['''semantic'''] * len(A ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self : Dict , A : Tuple=False , A : Dict=False , A : Optional[Any]="np" ) ->List[str]:
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
lowerCamelCase__ : Any = self.image_processing_tester.num_labels
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=A )
if with_segmentation_maps:
lowerCamelCase__ : int = num_labels
if is_instance_map:
lowerCamelCase__ : str = list(range(A ) ) * 2
lowerCamelCase__ : Union[str, Any] = dict(enumerate(A ) )
lowerCamelCase__ : int = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
lowerCamelCase__ : Any = [Image.fromarray(A ) for annotation in annotations]
lowerCamelCase__ : int = image_processor(
A , ['''semantic'''] * len(A ) , A , return_tensors='''pt''' , instance_id_to_semantic_id=A , pad_and_return_pixel_mask=A , )
return inputs
def __lowerCamelCase ( self : Dict ) ->Optional[Any]:
pass
def __lowerCamelCase ( self : Union[str, Any] ) ->Any:
def common(A : Dict=False , A : Tuple=None ):
lowerCamelCase__ : str = self.comm_get_image_processor_inputs(
with_segmentation_maps=A , is_instance_map=A , segmentation_type=A )
lowerCamelCase__ : Union[str, Any] = inputs['''mask_labels''']
lowerCamelCase__ : List[Any] = inputs['''class_labels''']
lowerCamelCase__ : List[str] = inputs['''pixel_values''']
lowerCamelCase__ : Union[str, Any] = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(A , A , A ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(A ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=A )
common(is_instance_map=A , segmentation_type='''pil''' )
common(is_instance_map=A , segmentation_type='''pil''' )
def __lowerCamelCase ( self : Any ) ->Optional[int]:
lowerCamelCase__ : List[Any] = np.zeros((2_0, 5_0) )
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : str = binary_mask_to_rle(A )
self.assertEqual(len(A ) , 4 )
self.assertEqual(rle[0] , 2_1 )
self.assertEqual(rle[1] , 4_5 )
def __lowerCamelCase ( self : int ) ->Dict:
lowerCamelCase__ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
lowerCamelCase__ : str = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCamelCase__ : List[Any] = fature_extractor.post_process_semantic_segmentation(A )
self.assertEqual(len(A ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
lowerCamelCase__ : Any = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
lowerCamelCase__ : Optional[Any] = fature_extractor.post_process_semantic_segmentation(A , target_sizes=A )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def __lowerCamelCase ( self : Tuple ) ->Tuple:
lowerCamelCase__ : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
lowerCamelCase__ : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCamelCase__ : Union[str, Any] = image_processor.post_process_instance_segmentation(A , threshold=0 )
self.assertTrue(len(A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def __lowerCamelCase ( self : str ) ->Dict:
lowerCamelCase__ : Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
lowerCamelCase__ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCamelCase__ : List[str] = image_processor.post_process_panoptic_segmentation(A , threshold=0 )
self.assertTrue(len(A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , A )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 265 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase__( lowerCamelCase__ ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Union[str, Any]:
A__ = feature_size
A__ = sampling_rate
A__ = padding_value
A__ = kwargs.pop('padding_side' ,'right' )
A__ = kwargs.pop('return_attention_mask' ,lowerCAmelCase__ )
super().__init__(**lowerCAmelCase__ )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = False ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,) -> Optional[int]:
if isinstance(lowerCAmelCase__ ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ):
A__ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
A__ = processed_features[self.model_input_names[0]]
A__ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCAmelCase__ ) == 0:
if return_attention_mask:
A__ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A__ = required_input[0]
if isinstance(lowerCAmelCase__ ,(list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A__ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCAmelCase__ ):
A__ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCAmelCase__ ):
A__ = """tf"""
elif is_torch_tensor(lowerCAmelCase__ ):
A__ = """pt"""
elif isinstance(lowerCAmelCase__ ,(int, float, list, tuple, np.ndarray) ):
A__ = """np"""
else:
raise ValueError(
f'''type of {first_element} unknown: {type(lowerCAmelCase__ )}. '''
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] ,(int, float) ):
A__ = to_numpy(lowerCAmelCase__ )
else:
A__ = [to_numpy(lowerCAmelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
A__ = self._get_padding_strategies(padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ )
A__ = processed_features[self.model_input_names[0]]
A__ = len(lowerCAmelCase__ )
if not all(len(lowerCAmelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
A__ = []
for i in range(lowerCAmelCase__ ):
A__ = {k: v[i] for k, v in processed_features.items()}
# truncation
A__ = self._truncate(
lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,pad_to_multiple_of=lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,)
truncated_inputs.append(lowerCAmelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A__ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A__ = PaddingStrategy.MAX_LENGTH
A__ = {}
for i in range(lowerCAmelCase__ ):
# padding
A__ = self._pad(
truncated_inputs[i] ,max_length=lowerCAmelCase__ ,padding_strategy=lowerCAmelCase__ ,pad_to_multiple_of=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,)
for key, value in outputs.items():
if key not in batch_outputs:
A__ = []
if value.dtype is np.dtype(np.floataa ):
A__ = value.astype(np.floataa )
batch_outputs[key].append(lowerCAmelCase__ )
return BatchFeature(lowerCAmelCase__ ,tensor_type=lowerCAmelCase__ )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = PaddingStrategy.DO_NOT_PAD ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,) -> str:
A__ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A__ = len(lowerCAmelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A__ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCAmelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A__ = np.ones(len(lowerCAmelCase__ ) ,dtype=np.intaa )
if needs_to_be_padded:
A__ = max_length - len(lowerCAmelCase__ )
if self.padding_side == "right":
if return_attention_mask:
A__ = np.pad(
processed_features['attention_mask'] ,(0, difference) )
A__ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A__ = np.pad(
lowerCAmelCase__ ,lowerCAmelCase__ ,'constant' ,constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A__ = np.pad(
processed_features['attention_mask'] ,(difference, 0) )
A__ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A__ = np.pad(
lowerCAmelCase__ ,lowerCAmelCase__ ,'constant' ,constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,) -> Optional[Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
A__ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A__ = len(lowerCAmelCase__ ) > max_length
if needs_to_be_truncated:
A__ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A__ = processed_features["""attention_mask"""][:max_length]
return processed_features
def snake_case__ ( self ,__UpperCAmelCase=False ,__UpperCAmelCase=None ) -> List[str]:
if padding is not False:
if padding is True:
A__ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = PaddingStrategy(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = padding
else:
A__ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 221 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCamelCase__ : Dict = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowerCAmelCase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: Optional[int]=None ):
require_version(deps[pkg] , _lowerCamelCase ) | 112 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : List[str]
SCREAMING_SNAKE_CASE_ : Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE_ : ClassVar[str] = "dict"
SCREAMING_SNAKE_CASE_ : ClassVar[Any] = None
SCREAMING_SNAKE_CASE_ : str = field(default="""Translation""" , init=lowerCamelCase_ , repr=lowerCamelCase_ )
def __call__( self ) -> str:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def _lowercase ( self ) -> Any:
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : Optional[List] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE_ : ClassVar[str] = "dict"
SCREAMING_SNAKE_CASE_ : ClassVar[Any] = None
SCREAMING_SNAKE_CASE_ : str = field(default="""TranslationVariableLanguages""" , init=lowerCamelCase_ , repr=lowerCamelCase_ )
def _lowercase ( self ) -> Union[str, Any]:
_snake_case = sorted(set(self.languages ) ) if self.languages else None
_snake_case = len(self.languages ) if self.languages else None
def __call__( self ) -> Dict:
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
_snake_case = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
f"""Some languages in example ({', '.join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_snake_case = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_snake_case , _snake_case = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def _lowercase ( self ) -> Any:
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 367 |
'''simple docstring'''
from manim import *
class _a ( __lowerCAmelCase ):
def _lowercase ( self ) -> Optional[int]:
_snake_case = Rectangle(height=0.5 ,width=0.5 )
_snake_case = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0 )
_snake_case = [mem.copy() for i in range(6 )]
_snake_case = [mem.copy() for i in range(6 )]
_snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
_snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
_snake_case = VGroup(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
_snake_case = Text("CPU" ,font_size=24 )
_snake_case = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0.5 ,aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
_snake_case = [mem.copy() for i in range(4 )]
_snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
_snake_case = Text("GPU" ,font_size=24 )
_snake_case = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0.5 ,aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(_SCREAMING_SNAKE_CASE )
_snake_case = [mem.copy() for i in range(6 )]
_snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
_snake_case = Text("Model" ,font_size=24 )
_snake_case = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0.5 ,aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(_SCREAMING_SNAKE_CASE )
_snake_case = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
rect.set_stroke(_SCREAMING_SNAKE_CASE )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_snake_case = Rectangle(height=0.4_6 / 4 ,width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE ,opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.0_2 ,direction=_SCREAMING_SNAKE_CASE )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] ,direction=_SCREAMING_SNAKE_CASE ,buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] ,direction=_SCREAMING_SNAKE_CASE ,buff=0.0 )
self.add(_SCREAMING_SNAKE_CASE )
cpu_targs.append(_SCREAMING_SNAKE_CASE )
_snake_case = [mem.copy() for i in range(6 )]
_snake_case = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,buff=0 )
_snake_case = Text("Loaded Checkpoint" ,font_size=24 )
_snake_case = Group(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE ,aligned_edge=_SCREAMING_SNAKE_CASE ,buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(_SCREAMING_SNAKE_CASE ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
_snake_case = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE ) ,Write(_SCREAMING_SNAKE_CASE ) )
self.play(Write(_SCREAMING_SNAKE_CASE ,run_time=1 ) ,Create(_SCREAMING_SNAKE_CASE ,run_time=1 ) )
_snake_case = []
_snake_case = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
_snake_case = fill.copy().set_fill(_SCREAMING_SNAKE_CASE ,opacity=0.7 )
target.move_to(_SCREAMING_SNAKE_CASE )
first_animations.append(GrowFromCenter(_SCREAMING_SNAKE_CASE ,run_time=1 ) )
_snake_case = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_SCREAMING_SNAKE_CASE ,run_time=1.5 ) )
self.play(*_SCREAMING_SNAKE_CASE )
self.play(*_SCREAMING_SNAKE_CASE )
self.wait()
| 142 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _lowercase , unittest.TestCase):
snake_case__ = CTRLTokenizer
snake_case__ = False
snake_case__ = False
def _UpperCamelCase ( self : str ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
_UpperCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_UpperCamelCase = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
_UpperCamelCase = {'unk_token': '<unk>'}
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase__ ) )
def _UpperCamelCase ( self : List[str] , **__UpperCamelCase : Union[str, Any] ) -> Any:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : str ) -> str:
_UpperCamelCase = 'adapt react readapt apt'
_UpperCamelCase = 'adapt react readapt apt'
return input_text, output_text
def _UpperCamelCase ( self : str ) -> Union[str, Any]:
_UpperCamelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase = 'adapt react readapt apt'
_UpperCamelCase = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
_UpperCamelCase = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
_UpperCamelCase = tokens + [tokenizer.unk_token]
_UpperCamelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
| 256 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case_ : Tuple = random.Random()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_=1.0 , UpperCAmelCase_=None , UpperCAmelCase_=None ):
if rng is None:
_UpperCamelCase : Dict = global_rng
_UpperCamelCase : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase ):
def __init__( self : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=7 ,lowerCamelCase__ : str=400 ,lowerCamelCase__ : int=2000 ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Union[str, Any]=16000 ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Optional[int]=True ,):
'''simple docstring'''
_UpperCamelCase : Optional[int] = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : List[str] = min_seq_length
_UpperCamelCase : Optional[int] = max_seq_length
_UpperCamelCase : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCamelCase : List[str] = feature_size
_UpperCamelCase : List[str] = padding_value
_UpperCamelCase : List[Any] = sampling_rate
_UpperCamelCase : Dict = return_attention_mask
_UpperCamelCase : Tuple = do_normalize
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Tuple=False ):
'''simple docstring'''
def _flatten(lowerCamelCase__ : Optional[Any] ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_UpperCamelCase : Optional[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_UpperCamelCase : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
_UpperCamelCase : int = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = WavaVecaFeatureExtractor
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = WavaVecaFeatureExtractionTester(self )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCamelCase__ ,axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ,axis=0 ) - 1 ) < 1E-3 ) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCamelCase : int = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Tuple = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
_UpperCamelCase : Tuple = feat_extract(speech_inputs[0] ,return_tensors='np' ).input_values
_UpperCamelCase : Any = feat_extract(np_speech_inputs[0] ,return_tensors='np' ).input_values
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
# Test batched
_UpperCamelCase : Union[str, Any] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
_UpperCamelCase : Optional[int] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCamelCase : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCamelCase : str = np.asarray(lowerCamelCase__ )
_UpperCamelCase : List[str] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
_UpperCamelCase : int = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
_UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = feat_extract(lowerCamelCase__ ,padding=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='np' )
_UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : List[str] = range(800 ,1400 ,200 )
_UpperCamelCase : List[str] = [floats_list((1, x) )[0] for x in lengths]
_UpperCamelCase : Optional[Any] = ['longest', 'max_length', 'do_not_pad']
_UpperCamelCase : str = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : List[str] = feat_extract(lowerCamelCase__ ,max_length=lowerCamelCase__ ,padding=lowerCamelCase__ )
_UpperCamelCase : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : List[Any] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Union[str, Any] = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=1000 ,padding='max_length' ,return_tensors='np' )
_UpperCamelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : int = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=1000 ,padding='longest' ,return_tensors='np' )
_UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Any = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=2000 ,padding='longest' ,return_tensors='np' )
_UpperCamelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
import torch
_UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = np.random.rand(100 ).astype(np.floataa )
_UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCamelCase : Optional[int] = feature_extractor.pad([{'input_values': inputs}] ,return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_UpperCamelCase : Tuple = feature_extractor.pad([{'input_values': inputs}] ,return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_UpperCamelCase : Optional[int] = WavaVecaConfig.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask ,config.feat_extract_norm == 'layer' )
| 83 | 0 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_0_0 * 2**2_0, 6_0_0 * 2**2_0] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_0_0 * 2**2_0, 9_0_0 * 2**2_0] )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , UpperCAmelCase__ )
UpperCamelCase_: Tuple = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCamelCase_: Dict = dataset_size < in_memory_max_size
else:
UpperCamelCase_: str = False
UpperCamelCase_: List[Any] = is_small_dataset(UpperCAmelCase__ )
assert result == expected | 358 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _a ( _lowerCamelCase ):
raise NotImplementedError()
@abstractmethod
def _a ( self ):
raise NotImplementedError() | 292 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
UpperCamelCase__ : Dict = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_A : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_A : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_A : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_A : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_A : bool = field(
default=lowerCamelCase__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_A : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_A : bool = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_A : Optional[str] = field(default=lowerCamelCase__ , metadata={'''help''': '''The input training data file (a text file).'''} )
_A : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_A : bool = field(
default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_A : Optional[int] = field(
default=lowerCamelCase__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_A : Optional[int] = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_A : bool = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
_A : Optional[int] = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_A : Optional[int] = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
if self.train_file is not None:
__SCREAMING_SNAKE_CASE : int = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__SCREAMING_SNAKE_CASE : List[str] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_A : PreTrainedTokenizerBase
_A : Union[bool, str, PaddingStrategy] = True
_A : Optional[int] = None
_A : Optional[int] = None
def __call__( self : List[Any] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = """label""" if """label""" in features[0].keys() else """labels"""
__SCREAMING_SNAKE_CASE : Tuple = [feature.pop(lowerCAmelCase__ ) for feature in features]
__SCREAMING_SNAKE_CASE : Tuple = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = len(features[0]["""input_ids"""] )
__SCREAMING_SNAKE_CASE : int = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase__ )] for feature in features
]
__SCREAMING_SNAKE_CASE : List[str] = list(chain(*lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Dict = self.tokenizer.pad(
lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
__SCREAMING_SNAKE_CASE : List[str] = {k: v.view(lowerCAmelCase__ , lowerCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
__SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.intaa )
return batch
def lowerCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , _lowerCamelCase , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
datasets.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__SCREAMING_SNAKE_CASE : List[Any] = {}
if data_args.train_file is not None:
__SCREAMING_SNAKE_CASE : List[Any] = data_args.train_file
if data_args.validation_file is not None:
__SCREAMING_SNAKE_CASE : str = data_args.validation_file
__SCREAMING_SNAKE_CASE : Tuple = data_args.train_file.split(""".""" )[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset(
_lowerCamelCase , data_files=_lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__SCREAMING_SNAKE_CASE : Optional[int] = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__SCREAMING_SNAKE_CASE : Tuple = [F"ending{i}" for i in range(4 )]
__SCREAMING_SNAKE_CASE : Union[str, Any] = """sent1"""
__SCREAMING_SNAKE_CASE : Optional[Any] = """sent2"""
if data_args.max_seq_length is None:
__SCREAMING_SNAKE_CASE : str = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
__SCREAMING_SNAKE_CASE : int = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
__SCREAMING_SNAKE_CASE : Any = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowerCamelCase: Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Optional[Any] = [[context] * 4 for context in examples[context_name]]
__SCREAMING_SNAKE_CASE : Optional[int] = examples[question_header_name]
__SCREAMING_SNAKE_CASE : str = [
[F"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(_lowerCamelCase )
]
# Flatten out
__SCREAMING_SNAKE_CASE : Any = list(chain(*_lowerCamelCase ) )
__SCREAMING_SNAKE_CASE : Dict = list(chain(*_lowerCamelCase ) )
# Tokenize
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(
_lowerCamelCase , _lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__SCREAMING_SNAKE_CASE : Optional[int] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE : str = min(len(_lowerCamelCase ) , data_args.max_train_samples )
__SCREAMING_SNAKE_CASE : Optional[Any] = train_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
__SCREAMING_SNAKE_CASE : List[Any] = train_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__SCREAMING_SNAKE_CASE : List[Any] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE : Dict = min(len(_lowerCamelCase ) , data_args.max_eval_samples )
__SCREAMING_SNAKE_CASE : str = eval_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
__SCREAMING_SNAKE_CASE : Tuple = eval_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__SCREAMING_SNAKE_CASE : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = eval_predictions
__SCREAMING_SNAKE_CASE : str = np.argmax(_lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__SCREAMING_SNAKE_CASE : Optional[Any] = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE : List[str] = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE : int = last_checkpoint
__SCREAMING_SNAKE_CASE : List[Any] = trainer.train(resume_from_checkpoint=_lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__SCREAMING_SNAKE_CASE : Optional[int] = train_result.metrics
__SCREAMING_SNAKE_CASE : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCamelCase )
)
__SCREAMING_SNAKE_CASE : Optional[int] = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics("""train""" , _lowerCamelCase )
trainer.save_metrics("""train""" , _lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__SCREAMING_SNAKE_CASE : Optional[int] = trainer.evaluate()
__SCREAMING_SNAKE_CASE : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics("""eval""" , _lowerCamelCase )
trainer.save_metrics("""eval""" , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCamelCase )
else:
trainer.create_model_card(**_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 112 |
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = [
(10_00, '''M'''),
(9_00, '''CM'''),
(5_00, '''D'''),
(4_00, '''CD'''),
(1_00, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def lowerCAmelCase_ ( _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : List[Any] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00}
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : str = 0
while place < len(_lowerCamelCase ):
if (place + 1 < len(_lowerCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase_ ( _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : Any = []
for arabic, roman in ROMAN:
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : str = divmod(_lowerCamelCase , _lowerCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 112 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :List[Any] , snake_case :pyspark.sql.DataFrame , snake_case :Optional[NamedSplit] = None , snake_case :Optional[Features] = None , snake_case :bool = True , snake_case :str = None , snake_case :bool = False , snake_case :str = None , snake_case :bool = True , snake_case :str = "arrow" , **snake_case :Optional[int] , ):
'''simple docstring'''
super().__init__(
split=snake_case , features=snake_case , cache_dir=snake_case , keep_in_memory=snake_case , streaming=snake_case , **snake_case , )
A_ : Dict = load_from_cache_file
A_ : Optional[Any] = file_format
A_ : str = Spark(
df=snake_case , features=snake_case , cache_dir=snake_case , working_dir=snake_case , **snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
A_ : Dict = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=snake_case , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 70 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCAmelCase : Optional[Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCAmelCase : Tuple = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCAmelCase : List[str] = '''zero2'''
_lowerCAmelCase : Dict = '''zero3'''
_lowerCAmelCase : Tuple = [ZEROa, ZEROa]
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] ) -> Any:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
A_ : Dict = parameterized.to_safe_name("_".join(str(_lowerCAmelCase ) for x in param.args ) )
return f"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
_lowerCAmelCase : List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Tuple , snake_case :Tuple ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Tuple , snake_case :Optional[Any] ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :str , snake_case :Tuple ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :int ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :str , snake_case :str , snake_case :int = 10 , snake_case :bool = True , snake_case :bool = True , snake_case :bool = True , ):
'''simple docstring'''
A_ : Any = models[model]
A_ : List[Any] = self.run_trainer(
stage=snake_case , model_name=snake_case , eval_steps=snake_case , num_train_epochs=1 , distributed=snake_case , fpaa=snake_case , )
self.do_checks(snake_case )
return output_dir
def SCREAMING_SNAKE_CASE ( self :str , snake_case :str , snake_case :str , snake_case :int = 10 , snake_case :int = 1 , snake_case :bool = True , snake_case :bool = True , ):
'''simple docstring'''
A_ : List[Any] = self.get_auto_remove_tmp_dir("./xxx" , after=snake_case )
A_ : Tuple = f"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(snake_case )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A_ : List[str] = f"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split()
A_ : List[str] = [f"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"]
A_ : str = self.get_launcher(snake_case )
A_ : Dict = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case , env=self.get_env() )
return output_dir
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Optional[Any]=False ):
'''simple docstring'''
A_ : int = min(2 , get_gpu_count() ) if distributed else 1
return f"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| 70 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = get_failure_array(__lowerCamelCase )
# 2) Step through text searching for pattern
_lowerCAmelCase , _lowerCAmelCase = 0, 0 # index into text, pattern
while i < len(__lowerCamelCase ):
if pattern[j] == text[i]:
if j == (len(__lowerCamelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_lowerCAmelCase = failure[j - 1]
continue
i += 1
return False
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = [0]
_lowerCAmelCase = 0
_lowerCAmelCase = 1
while j < len(__lowerCamelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_lowerCAmelCase = failure[i - 1]
continue
j += 1
failure.append(__lowerCamelCase )
return failure
if __name__ == "__main__":
# Test 1)
A__ : Optional[Any] ='''abc1abc12'''
A__ : str ='''alskfjaldsabc1abc1abc12k23adsfabcabc'''
A__ : List[str] ='''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
A__ : str ='''ABABX'''
A__ : int ='''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
A__ : Any ='''AAAB'''
A__ : int ='''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
A__ : Optional[Any] ='''abcdabcy'''
A__ : Tuple ='''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
A__ : Optional[int] ='''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 70 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase ( __lowerCamelCase : str ) ->Optional[int]:
def decorator(__lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(__lowerCamelCase , """handle_key""" , __lowerCamelCase )
return func
return decorator
def lowerCamelCase ( *__lowerCamelCase : List[str] ) ->Dict:
def decorator(__lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(__lowerCamelCase , """handle_key""" , __lowerCamelCase )
return func
return decorator
class a_ ( snake_case_ ):
'''simple docstring'''
def __new__( cls , A , A , A ) -> int:
_SCREAMING_SNAKE_CASE = super().__new__(cls , A , A , A )
if not hasattr(A , """key_handler""" ):
setattr(A , """key_handler""" , {} )
setattr(A , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
_SCREAMING_SNAKE_CASE = getattr(A , """handle_key""" , [] )
for key in handled_keys:
_SCREAMING_SNAKE_CASE = value
return new_cls
@staticmethod
def snake_case_( cls ) -> str:
_SCREAMING_SNAKE_CASE = get_character()
if char != KEYMAP["undefined"]:
_SCREAMING_SNAKE_CASE = ord(A )
_SCREAMING_SNAKE_CASE = cls.key_handler.get(A )
if handler:
_SCREAMING_SNAKE_CASE = char
return handler(cls )
else:
return None
def lowerCamelCase ( cls : Any ) ->Dict:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 58 | 0 |
import os
import sys
import transformers
__UpperCAmelCase :Any = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None) | 352 |
'''simple docstring'''
import os
import sys
import unittest
__UpperCAmelCase :Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase :Dict = os.path.join(git_repo_path, "src", "diffusers")
class a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Any ) -> int:
__UpperCAmelCase : Optional[Any] = find_backend(''' if not is_torch_available():''' )
self.assertEqual(snake_case , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__UpperCAmelCase : Union[str, Any] = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(snake_case , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__UpperCAmelCase : List[str] = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(snake_case , '''torch_and_transformers_and_onnx''' )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__UpperCAmelCase : Tuple = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , snake_case )
self.assertIn('''torch_and_transformers''' , snake_case )
self.assertIn('''flax_and_transformers''' , snake_case )
self.assertIn('''torch_and_transformers_and_onnx''' , snake_case )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase : str = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(snake_case , '''\nCONSTANT = None\n''' )
__UpperCAmelCase : Union[str, Any] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
snake_case , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__UpperCAmelCase : Optional[int] = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
__UpperCAmelCase : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(snake_case , snake_case )
def lowerCamelCase__ ( self : int ) -> List[Any]:
__UpperCAmelCase : List[str] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
__UpperCAmelCase : Optional[int] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , snake_case ) | 240 | 0 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
A__ : List[str] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
A__ : Any = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
A__ : Union[str, Any] = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def UpperCAmelCase__ ( self : Any):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def UpperCAmelCase__ ( self : int , A_ : Optional[int] , A_ : Union[str, Any] , A_ : Optional[int]=None , A_ : Optional[Any]=True , A_ : Optional[int]=False):
if rouge_types is None:
lowerCAmelCase_ : Dict = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
lowerCAmelCase_ : List[str] = rouge_scorer.RougeScorer(rouge_types=A_ , use_stemmer=A_)
if use_aggregator:
lowerCAmelCase_ : List[Any] = scoring.BootstrapAggregator()
else:
lowerCAmelCase_ : int = []
for ref, pred in zip(A_ , A_):
lowerCAmelCase_ : List[Any] = scorer.score(A_ , A_)
if use_aggregator:
aggregator.add_scores(A_)
else:
scores.append(A_)
if use_aggregator:
lowerCAmelCase_ : Optional[int] = aggregator.aggregate()
else:
lowerCAmelCase_ : Union[str, Any] = {}
for key in scores[0]:
lowerCAmelCase_ : List[Any] = [score[key] for score in scores]
return result
| 103 |
from __future__ import annotations
def lowerCAmelCase__( lowercase : str , lowercase : list[str] | None = None ) -> list[list[str]]:
__snake_case : List[str] = word_bank or []
# create a table
__snake_case : int = len(lowercase ) + 1
__snake_case : list[list[list[str]]] = []
for _ in range(lowercase ):
table.append([] )
# seed value
__snake_case : Optional[int] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowercase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowercase )] == word:
__snake_case : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowercase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowercase )]:
combination.reverse()
return table[len(lowercase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 326 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = StableUnCLIPPipeline
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : str = 32
lowercase__ : int = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase__ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowercase__ : Tuple = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase_ , projection_dim=UpperCAmelCase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase__ : Any = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCAmelCase_ , num_layers=1 , )
torch.manual_seed(0 )
lowercase__ : Any = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=UpperCAmelCase_ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
lowercase__ : Any = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase_ )
lowercase__ : int = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowercase__ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowercase__ : Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase__ : Optional[int] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase_ , layers_per_block=1 , upcast_attention=UpperCAmelCase_ , use_linear_projection=UpperCAmelCase_ , )
torch.manual_seed(0 )
lowercase__ : Optional[Any] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
lowercase__ : List[str] = AutoencoderKL()
lowercase__ : Optional[int] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0 ) -> Dict:
if str(UpperCAmelCase_ ).startswith('''mps''' ):
lowercase__ : Union[str, Any] = torch.manual_seed(UpperCAmelCase_ )
else:
lowercase__ : Dict = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowercase__ : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Any = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase_ )
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : List[Any] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase_ )
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
lowercase__ : Tuple = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ : Union[str, Any] = pipe('''anime turle''' , generator=UpperCAmelCase_ , output_type='''np''' )
lowercase__ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
def _lowerCAmelCase( self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ : Optional[int] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
lowercase__ : Optional[int] = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : Union[str, Any] = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
lowercase__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 365 | '''simple docstring'''
import qiskit
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
lowercase__ : Any = qiskit.QuantumCircuit(UpperCAmelCase , UpperCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowercase__ : Any = qiskit.execute(UpperCAmelCase , UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCAmelCase )
if __name__ == "__main__":
print(F'Total count for various states are: {single_qubit_measure(1, 1)}')
| 214 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : str = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """data2vec-text"""
def __init__( self , __magic_name__=3_05_22 , __magic_name__=7_68 , __magic_name__=12 , __magic_name__=12 , __magic_name__=30_72 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_12 , __magic_name__=2 , __magic_name__=0.0_2 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> Dict:
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = classifier_dropout
class a ( _SCREAMING_SNAKE_CASE ):
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_a = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 168 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a_ : List[str] = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
a_ : Any = {
"yjernite/retribert-base-uncased": 5_1_2,
}
a_ : Tuple = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = RetriBertTokenizer
_lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __magic_name__=None , __magic_name__=None , __magic_name__=True , __magic_name__="[UNK]" , __magic_name__="[SEP]" , __magic_name__="[PAD]" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> Tuple:
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __magic_name__ ) != do_lower_case
or normalizer_state.get('strip_accents' , __magic_name__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __magic_name__ ) != tokenize_chinese_chars
):
_a = getattr(__magic_name__ , normalizer_state.pop('type' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**__magic_name__ )
_a = do_lower_case
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=None ) -> Union[str, Any]:
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 168 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE : List[str] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''ViTFeatureExtractor''']
SCREAMING_SNAKE_CASE : Dict = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 317 |
"""simple docstring"""
from math import isqrt
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case_ , snake_case_ ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case_ ) if is_prime[i]]
def __UpperCAmelCase ( snake_case_ : int = 10**8 ) -> int:
"""simple docstring"""
_lowerCAmelCase = calculate_prime_numbers(max_number // 2 )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'{solution() = }') | 317 | 1 |
def __lowercase ( _UpperCamelCase ) ->str:
"""simple docstring"""
lowercase : Union[str, Any] = int(_UpperCamelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(_UpperCamelCase )
lowercase , lowercase : Optional[int] = divmod(_UpperCamelCase, 2 )
return binary_recursive(_UpperCamelCase ) + str(_UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->str:
"""simple docstring"""
lowercase : str = str(_UpperCamelCase ).strip()
if not number:
raise ValueError('''No input value was provided''' )
lowercase : List[Any] = '''-''' if number.startswith('''-''' ) else ''''''
lowercase : List[Any] = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f"""{negative}0b{binary_recursive(int(_UpperCamelCase ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 337 |
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(_UpperCamelCase ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337 | 1 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str = "" ):
'''simple docstring'''
UpperCAmelCase__ = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
UpperCAmelCase__ = BeautifulSoup(requests.get(__UpperCamelCase ).text , """html.parser""" )
UpperCAmelCase__ = soup.find_all("""td""" , attrs="""titleColumn""" )
UpperCAmelCase__ = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__UpperCamelCase , __UpperCamelCase )
}
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
UpperCAmelCase__ = get_imdb_top_aaa_movies()
with open(__UpperCamelCase , """w""" , newline="""""" ) as out_file:
UpperCAmelCase__ = csv.writer(__UpperCamelCase )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 368 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
UpperCAmelCase_ = logging.getLogger(__name__)
UpperCAmelCase_ = 'Hello world! cécé herlolip'
UpperCAmelCase_ = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ = BertAbsConfig(
temp_dir=""".""" , finetune_bert=SCREAMING_SNAKE_CASE__ , large=SCREAMING_SNAKE_CASE__ , share_emb=SCREAMING_SNAKE_CASE__ , use_bert_emb=SCREAMING_SNAKE_CASE__ , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
UpperCAmelCase__ = torch.load(SCREAMING_SNAKE_CASE__ , lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : storage )
UpperCAmelCase__ = AbsSummarizer(SCREAMING_SNAKE_CASE__ , torch.device("""cpu""" ) , SCREAMING_SNAKE_CASE__ )
original.eval()
UpperCAmelCase__ = BertAbsSummarizer(SCREAMING_SNAKE_CASE__ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
UpperCAmelCase__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
UpperCAmelCase__ = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(SCREAMING_SNAKE_CASE__ )) )
UpperCAmelCase__ = torch.tensor(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
UpperCAmelCase__ = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(SCREAMING_SNAKE_CASE__ )) )
UpperCAmelCase__ = torch.tensor(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase__ = encoder_input_ids
UpperCAmelCase__ = decoder_input_ids
UpperCAmelCase__ = UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = UpperCAmelCase__ = None
UpperCAmelCase__ = UpperCAmelCase__ = None
UpperCAmelCase__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase__ = original(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
UpperCAmelCase__ = original.generator(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = new_model(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0]
UpperCAmelCase__ = new_model.generator(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase__ = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
UpperCAmelCase_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 61 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __a ( a_ , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = CanineTokenizer
_lowerCAmelCase : Union[str, Any] = False
def __lowercase ( self : int ):
'''simple docstring'''
super().setUp()
UpperCamelCase__ : Dict = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase ( self : str ):
'''simple docstring'''
return CanineTokenizer.from_pretrained("google/canine-s" )
def __lowercase ( self : Optional[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
UpperCamelCase__ : Dict = 10_24
return tokenizer
@require_torch
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.canine_tokenizer
UpperCamelCase__ : str = ["Life is like a box of chocolates.", "You never know what you\'re gonna get."]
# fmt: off
UpperCamelCase__ : str = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
UpperCamelCase__ : str = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase__ : List[str] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.canine_tokenizer
UpperCamelCase__ : List[str] = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
UpperCamelCase__ : Optional[Any] = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , _lowerCamelCase )
self.assertIn("attention_mask" , _lowerCamelCase )
self.assertIn("token_type_ids" , _lowerCamelCase )
@require_torch
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Dict = self.canine_tokenizer
UpperCamelCase__ : Dict = [
"What\'s the weater?",
"It\'s about 25 degrees.",
]
UpperCamelCase__ : str = tokenizer(
text_target=_lowerCamelCase , max_length=32 , padding="max_length" , truncation=_lowerCamelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase__ : str = tempfile.mkdtemp()
UpperCamelCase__ : int = " He is very happy, UNwant\u00E9d,running"
UpperCamelCase__ : List[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
UpperCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(_lowerCamelCase )
UpperCamelCase__ : Optional[Any] = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
shutil.rmtree(_lowerCamelCase )
UpperCamelCase__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase__ : List[Any] = tempfile.mkdtemp()
UpperCamelCase__ : Optional[int] = " He is very happy, UNwant\u00E9d,running"
UpperCamelCase__ : Union[str, Any] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCamelCase__ : Dict = chr(0Xe0_07 )
additional_special_tokens.append(_lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCamelCase__ : Optional[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
UpperCamelCase__ : Dict = tokenizer.__class__.from_pretrained(_lowerCamelCase )
UpperCamelCase__ : List[Any] = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertIn(_lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCamelCase__ : List[str] = tokenizer.__class__.from_pretrained(_lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCamelCase )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCamelCase__ , UpperCamelCase__ : str = self.get_clean_sequence(_lowerCamelCase )
# a special token for Canine can be defined as follows:
UpperCamelCase__ : Optional[int] = 0Xe0_05
UpperCamelCase__ : str = chr(_lowerCamelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCamelCase__ : Tuple = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) , 1 )
UpperCamelCase__ : str = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowerCamelCase )
UpperCamelCase__ : Optional[int] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCamelCase__ : Optional[int] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCamelCase__ : List[str] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , input_encoded + special_token_id )
UpperCamelCase__ : str = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
self.assertTrue(special_token not in decoded )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCamelCase__ : Tuple = chr(0Xe0_05 )
UpperCamelCase__ : int = chr(0Xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
UpperCamelCase__ : Optional[Any] = tokenizer.tokenize(_lowerCamelCase )
UpperCamelCase__ : List[str] = tokenizer.tokenize(_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) , 1 )
self.assertEqual(len(_lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , _lowerCamelCase )
self.assertEqual(token_a[0] , _lowerCamelCase )
@require_tokenizers
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
UpperCamelCase__ : int = 0Xe0_06
UpperCamelCase__ : List[str] = chr(_lowerCamelCase )
UpperCamelCase__ : Dict = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCamelCase )
tokenizer.from_pretrained(_lowerCamelCase )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCamelCase__ : int = json.load(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCamelCase__ : Optional[Any] = json.load(_lowerCamelCase )
# a special token for Canine can be defined as follows:
UpperCamelCase__ : Dict = 0Xe0_06
UpperCamelCase__ : List[Any] = chr(_lowerCamelCase )
UpperCamelCase__ : Union[str, Any] = [new_token_a]
UpperCamelCase__ : List[str] = [new_token_a]
with open(os.path.join(_lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
with open(os.path.join(_lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCamelCase__ : Tuple = tokenizer_class.from_pretrained(_lowerCamelCase , extra_ids=0 )
self.assertIn(_lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCamelCase__ : Any = 0Xe0_07
UpperCamelCase__ : Optional[int] = chr(_lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCamelCase__ : List[str] = [AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase )]
UpperCamelCase__ : Optional[Any] = tokenizer_class.from_pretrained(
_lowerCamelCase , additional_special_tokens=_lowerCamelCase , extra_ids=0 )
self.assertIn(_lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCamelCase__ : List[str] = "hello world"
if self.space_between_special_tokens:
UpperCamelCase__ : str = "[CLS] hello world [SEP]"
else:
UpperCamelCase__ : List[str] = input
UpperCamelCase__ : Union[str, Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCamelCase__ : Dict = tokenizer.decode(_lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCamelCase , [output, output.lower()] )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCamelCase__ : Union[str, Any] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
UpperCamelCase__ : str = "a"
UpperCamelCase__ : Union[str, Any] = ord(_lowerCamelCase )
for attr in attributes_list:
setattr(_lowerCamelCase , attr + "_id" , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , attr + "_id" ) , _lowerCamelCase )
setattr(_lowerCamelCase , attr + "_id" , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , attr + "_id" ) , _lowerCamelCase )
setattr(_lowerCamelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(_lowerCamelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(_lowerCamelCase , "additional_special_tokens_ids" ) , [] )
UpperCamelCase__ : List[str] = 0Xe0_06
UpperCamelCase__ : Dict = chr(_lowerCamelCase )
setattr(_lowerCamelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(_lowerCamelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(_lowerCamelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
def __lowercase ( self : Tuple ):
'''simple docstring'''
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass | 189 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase_ ( self ):
lowercase , lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
lowercase = 'A painting of a squirrel eating a burger'
lowercase = jax.device_count()
lowercase = num_samples * [prompt]
lowercase = sd_pipe.prepare_inputs(_lowerCamelCase )
lowercase = replicate(_lowerCamelCase )
lowercase = shard(_lowerCamelCase )
lowercase = jax.random.PRNGKey(0 )
lowercase = jax.random.split(_lowerCamelCase , jax.device_count() )
lowercase = sd_pipe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_inference_steps=2_5 , jit=_lowerCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self ):
lowercase = 'stabilityai/stable-diffusion-2'
lowercase , lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCamelCase , subfolder='scheduler' )
lowercase , lowercase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCamelCase , scheduler=_lowerCamelCase , revision='bf16' , dtype=jnp.bfloataa , )
lowercase = scheduler_params
lowercase = 'A painting of a squirrel eating a burger'
lowercase = jax.device_count()
lowercase = num_samples * [prompt]
lowercase = sd_pipe.prepare_inputs(_lowerCamelCase )
lowercase = replicate(_lowerCamelCase )
lowercase = shard(_lowerCamelCase )
lowercase = jax.random.PRNGKey(0 )
lowercase = jax.random.split(_lowerCamelCase , jax.device_count() )
lowercase = sd_pipe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_inference_steps=2_5 , jit=_lowerCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 220 | 0 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class snake_case :
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=1_3 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : Any=3_3 , UpperCamelCase__ : List[str]=3_2 , UpperCamelCase__ : Dict=5 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : Union[str, Any]=3_7 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[Any]=5_1_2 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : int=None , )-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = parent
__lowerCAmelCase: str = batch_size
__lowerCAmelCase: str = seq_length
__lowerCAmelCase: List[str] = is_training
__lowerCAmelCase: List[str] = use_input_mask
__lowerCAmelCase: Tuple = use_token_type_ids
__lowerCAmelCase: str = use_labels
__lowerCAmelCase: List[Any] = vocab_size
__lowerCAmelCase: Any = hidden_size
__lowerCAmelCase: List[str] = num_hidden_layers
__lowerCAmelCase: Optional[int] = num_attention_heads
__lowerCAmelCase: Union[str, Any] = intermediate_size
__lowerCAmelCase: Optional[int] = hidden_act
__lowerCAmelCase: List[Any] = hidden_dropout_prob
__lowerCAmelCase: str = attention_probs_dropout_prob
__lowerCAmelCase: Tuple = max_position_embeddings
__lowerCAmelCase: Union[str, Any] = type_vocab_size
__lowerCAmelCase: List[str] = type_sequence_label_size
__lowerCAmelCase: List[Any] = initializer_range
__lowerCAmelCase: str = num_labels
__lowerCAmelCase: Dict = num_choices
__lowerCAmelCase: str = scope
def lowercase_ ( self : Union[str, Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase: List[Any] = None
if self.use_input_mask:
__lowerCAmelCase: Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase: str = None
__lowerCAmelCase: Dict = None
__lowerCAmelCase: Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase: List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Dict)-> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowercase_ ( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = EsmModel(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)
__lowerCAmelCase: List[str] = model(UpperCamelCase__)
__lowerCAmelCase: Dict = model(UpperCamelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowercase_ ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: Tuple = EsmForMaskedLM(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowercase_ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = self.num_labels
__lowerCAmelCase: Tuple = EsmForTokenClassification(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: int = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowercase_ ( self : List[str])-> Dict:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): int = config_and_inputs
__lowerCAmelCase: Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __snake_case, __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : str = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Optional[int] = ()
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
{
"""feature-extraction""": EsmModel,
"""fill-mask""": EsmForMaskedLM,
"""text-classification""": EsmForSequenceClassification,
"""token-classification""": EsmForTokenClassification,
"""zero-shot""": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Dict = True
def lowercase_ ( self : List[str])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: str = EsmModelTester(self)
__lowerCAmelCase: str = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7)
def lowercase_ ( self : Tuple)-> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : int)-> Any:
'''simple docstring'''
__lowerCAmelCase: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__)
def lowercase_ ( self : Optional[int])-> Dict:
'''simple docstring'''
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase: Dict = type
self.model_tester.create_and_check_model(*UpperCamelCase__)
def lowercase_ ( self : str)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__)
def lowercase_ ( self : List[Any])-> str:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__)
@slow
def lowercase_ ( self : Optional[int])-> str:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: List[str] = EsmModel.from_pretrained(UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
def lowercase_ ( self : Any)-> int:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowerCAmelCase: str = EsmEmbeddings(config=UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]])
__lowerCAmelCase: int = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
])
__lowerCAmelCase: Tuple = create_position_ids_from_input_ids(UpperCamelCase__ , model.padding_idx)
self.assertEqual(position_ids.shape , expected_positions.shape)
self.assertTrue(torch.all(torch.eq(UpperCamelCase__ , UpperCamelCase__)))
def lowercase_ ( self : str)-> int:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()[0]
__lowerCAmelCase: str = EsmEmbeddings(config=UpperCamelCase__)
__lowerCAmelCase: Optional[int] = torch.empty(2 , 4 , 3_0)
__lowerCAmelCase: Any = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__lowerCAmelCase: Any = torch.as_tensor([expected_single_positions, expected_single_positions])
__lowerCAmelCase: Tuple = embeddings.create_position_ids_from_inputs_embeds(UpperCamelCase__)
self.assertEqual(position_ids.shape , expected_positions.shape)
self.assertTrue(torch.all(torch.eq(UpperCamelCase__ , UpperCamelCase__)))
@unittest.skip("Esm does not support embedding resizing")
def lowercase_ ( self : str)-> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("Esm does not support embedding resizing")
def lowercase_ ( self : Optional[int])-> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def lowercase_ ( self : int)-> Tuple:
'''simple docstring'''
pass
@require_torch
class snake_case ( __snake_case ):
@slow
def lowercase_ ( self : str)-> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
__lowerCAmelCase: Any = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D")
model.eval()
__lowerCAmelCase: Any = torch.tensor([[0, 1, 2, 3, 4, 5]])
__lowerCAmelCase: Optional[int] = model(UpperCamelCase__)[0]
__lowerCAmelCase: str = 3_3
__lowerCAmelCase: Any = torch.Size((1, 6, vocab_size))
self.assertEqual(output.shape , UpperCamelCase__)
__lowerCAmelCase: Optional[int] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4))
@slow
def lowercase_ ( self : Any)-> Tuple:
'''simple docstring'''
with torch.no_grad():
__lowerCAmelCase: str = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D")
model.eval()
__lowerCAmelCase: List[str] = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]])
__lowerCAmelCase: List[Any] = model(UpperCamelCase__)[0]
# compare the actual values for a slice.
__lowerCAmelCase: Dict = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4))
| 108 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def a__ ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 108 | 1 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 279 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 279 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCAmelCase : Tuple = True
for i in range(0 , len(UpperCAmelCase_ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase , UpperCAmelCase : Any = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase : Tuple = False
for i in range(1 , len(UpperCAmelCase_ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase , UpperCAmelCase : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase : int = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
lowercase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowercase__ = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 280 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : str , lowercase_ : Union[str, Any]=7 , lowercase_ : Union[str, Any]=3 , lowercase_ : int=30 , lowercase_ : Tuple=400 , lowercase_ : Tuple=True , lowercase_ : Optional[int]=None , lowercase_ : List[str]=0.9 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=True , lowercase_ : int=[0.5, 0.5, 0.5] , lowercase_ : List[str]=[0.5, 0.5, 0.5] , ) -> Tuple:
UpperCAmelCase : Optional[int] = size if size is not None else {'shortest_edge': 30}
UpperCAmelCase : int = crop_size if crop_size is not None else {'height': 30, 'width': 30}
UpperCAmelCase : Tuple = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : int = min_resolution
UpperCAmelCase : Optional[int] = max_resolution
UpperCAmelCase : str = do_resize_and_center_crop
UpperCAmelCase : int = size
UpperCAmelCase : Dict = crop_pct
UpperCAmelCase : Union[str, Any] = crop_size
UpperCAmelCase : Optional[int] = do_normalize
UpperCAmelCase : Optional[Any] = image_mean
UpperCAmelCase : Optional[Any] = image_std
def UpperCAmelCase_ ( self : str ) -> int:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : Dict ) -> str:
UpperCAmelCase : Any = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Tuple ) -> str:
UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(lowercase_ , 'size' ) )
self.assertTrue(hasattr(lowercase_ , 'crop_pct' ) )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase_ , 'image_std' ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
pass
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
# Initialize image_processing
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : str = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
# Initialize image_processing
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : Optional[Any] = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase_ ( self : str ) -> Dict:
# Initialize image_processing
UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 280 | 1 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : int = 0
_snake_case : bool = False
_snake_case : float = 3.0
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCamelCase ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def __UpperCAmelCase ( self ) -> Optional[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCAmelCase_ : Optional[Any] = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCAmelCase_ : Union[str, Any] = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCAmelCase_ : Any = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_0_0_0 )
self.assertEqual(scaler._enabled , _UpperCamelCase )
@require_multi_gpu
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : int = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__UpperCAmelCase = Accelerator(kwargs_handlers=[ddp_scaler])
__UpperCAmelCase = torch.nn.Linear(100, 200)
__UpperCAmelCase = accelerator.prepare(model)
# Check the values changed in kwargs
__UpperCAmelCase = ''
__UpperCAmelCase = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 29 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''mra'''
def __init__( self : str , _A : List[str]=5_0265 , _A : int=768 , _A : Union[str, Any]=12 , _A : Union[str, Any]=12 , _A : Union[str, Any]=3072 , _A : Any="gelu" , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=512 , _A : Tuple=1 , _A : List[str]=0.02 , _A : Union[str, Any]=1e-5 , _A : Optional[int]="absolute" , _A : Union[str, Any]=4 , _A : List[Any]="full" , _A : Union[str, Any]=0 , _A : Union[str, Any]=0 , _A : Optional[Any]=1 , _A : Union[str, Any]=0 , _A : Any=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
__SCREAMING_SNAKE_CASE : Any = type_vocab_size
__SCREAMING_SNAKE_CASE : str = layer_norm_eps
__SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
__SCREAMING_SNAKE_CASE : str = block_per_row
__SCREAMING_SNAKE_CASE : Union[str, Any] = approx_mode
__SCREAMING_SNAKE_CASE : Optional[int] = initial_prior_first_n_blocks
__SCREAMING_SNAKE_CASE : List[Any] = initial_prior_diagonal_n_blocks
| 303 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
UpperCAmelCase__ : Optional[Any] ='''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
UpperCAmelCase__ : Tuple ='''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
UpperCAmelCase__ : Optional[Any] ='''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = CHRF.CHAR_ORDER , UpperCAmelCase_ = CHRF.WORD_ORDER , UpperCAmelCase_ = CHRF.BETA , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , ):
lowerCamelCase =len(references[0] )
if any(len(UpperCAmelCase_ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowerCamelCase =[[refs[i] for refs in references] for i in range(UpperCAmelCase_ )]
lowerCamelCase =CHRF(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =sb_chrf.corpus_score(UpperCAmelCase_ , UpperCAmelCase_ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 262 |
import qiskit
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> qiskit.result.counts.Counts:
lowerCamelCase =qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
lowerCamelCase =qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowerCamelCase =qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
print(F"Total count for various states are: {single_qubit_measure(1, 1)}")
| 262 | 1 |
from math import sqrt
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : List[Any] = 0
for i in range(1 , int(sqrt(a ) + 1 ) ):
if n % i == 0 and i != sqrt(a ):
total += i + n // i
elif i == sqrt(a ):
total += i
return total - n
def _SCREAMING_SNAKE_CASE ( a = 1_00_00 ) -> int:
__A : List[Any] = sum(
i
for i in range(1 , a )
if sum_of_divisors(sum_of_divisors(a ) ) == i and sum_of_divisors(a ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 280 |
from itertools import permutations
def _a ( lowerCamelCase: tuple ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__A = [7, 11, 13, 17]
for i, test in enumerate(lowerCamelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def _a ( lowerCamelCase: int = 10 ) -> int:
'''simple docstring'''
return sum(
int(''''''.join(map(lowerCamelCase , lowerCamelCase ) ) )
for num in permutations(range(lowerCamelCase ) )
if is_substring_divisible(lowerCamelCase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 117 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 364 |
'''simple docstring'''
lowerCamelCase_ = 'Tobias Carryer'
from time import time
class lowercase_ :
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict=int(time() ) ): # noqa: B008
"""simple docstring"""
_SCREAMING_SNAKE_CASE = multiplier
_SCREAMING_SNAKE_CASE = increment
_SCREAMING_SNAKE_CASE = modulo
_SCREAMING_SNAKE_CASE = seed
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
lowerCamelCase_ = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 111 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : Tuple = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCamelCase )
if number < 1:
lowerCamelCase__ : int = f'''Input value of [number={number}] must be > 0'''
raise ValueError(UpperCamelCase )
lowerCamelCase__ : Optional[int] = 1
for i in range(1 , UpperCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
A__ : str = get_logger(__name__)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
__lowerCamelCase : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : Optional[int] = module._original_module if isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj ) else module
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Tuple = []
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = obj
__lowerCamelCase : List[Any] = target
__lowerCamelCase : Union[str, Any] = new
__lowerCamelCase : Union[str, Any] = target.split('.' )[0]
__lowerCamelCase : Dict = {}
__lowerCamelCase : Dict = attrs or []
def __enter__( self ) -> Optional[int]:
*__lowerCamelCase , __lowerCamelCase : int = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
try:
__lowerCamelCase : Optional[int] = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__lowerCamelCase : List[Any] = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__lowerCamelCase : Optional[Any] = obj_attr
# patch at top level
setattr(self.obj , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(SCREAMING_SNAKE_CASE_ , attrs=self.attrs ) )
__lowerCamelCase : str = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , attrs=self.attrs ) )
__lowerCamelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# finally set the target attribute
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__lowerCamelCase : Union[str, Any] = getattr(import_module('.'.join(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , SCREAMING_SNAKE_CASE_ ) is attr_value:
__lowerCamelCase : Optional[int] = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__lowerCamelCase : List[Any] = globals()['__builtins__'][target_attr]
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new )
else:
raise RuntimeError(f'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self , *SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
for attr in list(self.original ):
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.original.pop(SCREAMING_SNAKE_CASE_ ) )
def lowercase_ ( self ) -> Optional[int]:
self.__enter__()
self._active_patches.append(self )
def lowercase_ ( self ) -> str:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 185 | 0 |
from collections.abc import Callable
import numpy as np
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> np.array:
a = int(np.ceil((x_end - xa) / step_size ) )
a = np.zeros((n + 1,) )
a = ya
a = xa
for k in range(__lowerCamelCase ):
a = y[k] + step_size * ode_func(__lowerCamelCase , y[k] )
a = y[k] + (
(step_size / 2) * (ode_func(__lowerCamelCase , y[k] ) + ode_func(x + step_size , __lowerCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 |
def __A ( __lowerCamelCase ) -> bool:
if num < 0:
return False
a = num
a = 0
while num > 0:
a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 1 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_a = pd.read_csv('sample_data.csv', header=None)
_a = df.shape[:1][0]
# If you're using some other dataset input the target column
_a = df.iloc[:, 1:2]
_a = actual_data.values.reshape(len_data, 1)
_a = MinMaxScaler().fit_transform(actual_data)
_a = 10
_a = 5
_a = 20
_a = len_data - periods * look_back
_a = actual_data[:division]
_a = actual_data[division - look_back :]
_a , _a = [], []
_a , _a = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_a = np.array(train_x)
_a = np.array(test_x)
_a = np.array([list(i.ravel()) for i in train_y])
_a = np.array([list(i.ravel()) for i in test_y])
_a = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
_a = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_a = model.predict(x_test)
| 61 |
"""simple docstring"""
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ):
if index == number_of_items:
return 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Tuple = knapsack(A__ ,A__ ,A__ ,A__ ,index + 1 )
if weights[index] <= max_weight:
UpperCAmelCase_ : Union[str, Any] = values[index] + knapsack(
A__ ,A__ ,A__ ,max_weight - weights[index] ,index + 1 )
return max(A__ ,A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int = 2_00_00_00 ) -> int:
__A : Optional[int] = [0 for i in range(n + 1 )]
__A : Union[str, Any] = 1
__A : Optional[Any] = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __snake_case ):
__A : List[str] = 1
__A : Optional[Any] = 0
for i in range(__snake_case ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""") | 364 |
'''simple docstring'''
lowercase__ : Any = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowercase__ : List[Any] = ['''a''', '''b''', '''c''', '''d''', '''e''']
def _lowerCAmelCase ( __snake_case : str , __snake_case : Tuple , __snake_case : int ) -> Tuple:
__A : List[str] = start
# add current to visited
visited.append(__snake_case )
__A : Optional[int] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__A : int = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
__A : Dict = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
lowercase__ : Tuple = topological_sort('''a''', [], [])
print(sort) | 190 | 0 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ ( _lowercase):
snake_case__ = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''})
snake_case__ = field(default=_lowercase , metadata={'''help''': '''Whether to SortishSamler or not.'''})
snake_case__ = field(
default=_lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''})
snake_case__ = field(default=_lowercase , metadata={'''help''': '''whether to use adafactor'''})
snake_case__ = field(
default=_lowercase , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''})
snake_case__ = field(
default=_lowercase , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''})
snake_case__ = field(default=_lowercase , metadata={'''help''': '''Dropout probability. Goes into model.config.'''})
snake_case__ = field(
default=_lowercase , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''})
snake_case__ = field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys())}'''} , )
| 256 | """simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str]=13 , __UpperCamelCase : str=7 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : int=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : Dict=32 , __UpperCamelCase : int=5 , __UpperCamelCase : str=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Any=512 , __UpperCamelCase : List[str]=16 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : List[str]=0.0_2 , __UpperCamelCase : str=False , __UpperCamelCase : Dict=True , __UpperCamelCase : Tuple="None" , __UpperCamelCase : Dict=3 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Any=None , ) -> Tuple:
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = relative_attention
_UpperCamelCase = position_biased_input
_UpperCamelCase = pos_att_type
_UpperCamelCase = scope
def _UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase = self.get_config()
_UpperCamelCase = 300
return config
def _UpperCamelCase ( self : int , __UpperCamelCase : List[Any] ) -> str:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _UpperCamelCase ( self : Any , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] ) -> List[str]:
_UpperCamelCase = DebertaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )[0]
_UpperCamelCase = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )[0]
_UpperCamelCase = model(__UpperCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[int] ) -> Tuple:
_UpperCamelCase = DebertaForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple ) -> List[Any]:
_UpperCamelCase = self.num_labels
_UpperCamelCase = DebertaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str ) -> Dict:
_UpperCamelCase = self.num_labels
_UpperCamelCase = DebertaForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ) -> List[Any]:
_UpperCamelCase = DebertaForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Any ) -> Union[str, Any]:
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
_UpperCamelCase = DebertaModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def _UpperCamelCase ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ) -> List[str]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCamelCase )
def _UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCamelCase )
def _UpperCamelCase ( self : Dict ) -> Tuple:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCamelCase )
@slow
def _UpperCamelCase ( self : Any ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = DebertaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
pass
@slow
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
_UpperCamelCase = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
# compare the actual values for a slice.
_UpperCamelCase = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 256 | 1 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : List[str] ) -> List[str]:
_SCREAMING_SNAKE_CASE = old_name
if "patch_embed" in old_name:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = old_name.split("." )
if layer == "0":
_SCREAMING_SNAKE_CASE = old_name.replace("0" , "convolution1" )
elif layer == "1":
_SCREAMING_SNAKE_CASE = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
_SCREAMING_SNAKE_CASE = old_name.replace("3" , "convolution2" )
else:
_SCREAMING_SNAKE_CASE = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , __A ):
_SCREAMING_SNAKE_CASE = R"\b\d{2}\b"
if bool(re.search(__A , __A ) ):
_SCREAMING_SNAKE_CASE = re.search(R"\d\.\d\d." , __A ).group()
else:
_SCREAMING_SNAKE_CASE = re.search(R"\d\.\d." , __A ).group()
if int(match[0] ) < 6:
_SCREAMING_SNAKE_CASE = old_name.replace(__A , "" )
_SCREAMING_SNAKE_CASE = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
_SCREAMING_SNAKE_CASE = "intermediate_stages." + trimmed_name
else:
_SCREAMING_SNAKE_CASE = old_name.replace(__A , "" )
if int(match[2] ) < num_meta4D_last_stage:
_SCREAMING_SNAKE_CASE = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
_SCREAMING_SNAKE_CASE = str(int(match[2] ) - num_meta4D_last_stage )
_SCREAMING_SNAKE_CASE = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
_SCREAMING_SNAKE_CASE = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
_SCREAMING_SNAKE_CASE = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
_SCREAMING_SNAKE_CASE = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
_SCREAMING_SNAKE_CASE = trimmed_name.replace("fc2" , "linear_out" )
_SCREAMING_SNAKE_CASE = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d." , __A ):
_SCREAMING_SNAKE_CASE = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
_SCREAMING_SNAKE_CASE = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_SCREAMING_SNAKE_CASE = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_SCREAMING_SNAKE_CASE = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
_SCREAMING_SNAKE_CASE = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
_SCREAMING_SNAKE_CASE = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
_SCREAMING_SNAKE_CASE = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
_SCREAMING_SNAKE_CASE = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_SCREAMING_SNAKE_CASE = new_name.replace("norm" , "layernorm" )
_SCREAMING_SNAKE_CASE = "efficientformer." + new_name
else:
_SCREAMING_SNAKE_CASE = "efficientformer.encoder." + new_name
return new_name
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : Optional[int] ) -> Tuple:
for key in checkpoint.copy().keys():
_SCREAMING_SNAKE_CASE = checkpoint.pop(__A )
_SCREAMING_SNAKE_CASE = val
return checkpoint
def SCREAMING_SNAKE_CASE_ ( ) -> str:
_SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
_SCREAMING_SNAKE_CASE = Image.open(requests.get(__A , stream=__A ).raw )
return image
def SCREAMING_SNAKE_CASE_ ( __A : Path , __A : Path , __A : Path , __A : bool ) -> Any:
_SCREAMING_SNAKE_CASE = torch.load(__A , map_location="cpu" )["model"]
_SCREAMING_SNAKE_CASE = EfficientFormerConfig.from_json_file(__A )
_SCREAMING_SNAKE_CASE = EfficientFormerForImageClassificationWithTeacher(__A )
_SCREAMING_SNAKE_CASE = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
_SCREAMING_SNAKE_CASE = config.depths[-1] - config.num_metaad_blocks + 1
_SCREAMING_SNAKE_CASE = convert_torch_checkpoint(__A , __A )
model.load_state_dict(__A )
model.eval()
_SCREAMING_SNAKE_CASE = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = 2_56
_SCREAMING_SNAKE_CASE = 2_24
_SCREAMING_SNAKE_CASE = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
_SCREAMING_SNAKE_CASE = processor(images=__A , return_tensors="pt" ).pixel_values
# original processing pipeline
_SCREAMING_SNAKE_CASE = Compose(
[
Resize(__A , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(__A ),
ToTensor(),
Normalize(__A , __A ),
] )
_SCREAMING_SNAKE_CASE = image_transforms(__A ).unsqueeze(0 )
assert torch.allclose(__A , __A )
_SCREAMING_SNAKE_CASE = model(__A )
_SCREAMING_SNAKE_CASE = outputs.logits
_SCREAMING_SNAKE_CASE = (1, 10_00)
if "l1" in model_name:
_SCREAMING_SNAKE_CASE = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , __A , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_SCREAMING_SNAKE_CASE = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , __A , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_SCREAMING_SNAKE_CASE = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(__A )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=__A , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=__A , )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase_ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 111 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 111 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : Tuple ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCamelCase ( self : str ) -> Union[str, Any]:
_UpperCamelCase = 1
_UpperCamelCase = 3
_UpperCamelCase = (32, 32)
_UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase__ )
return image
@property
def _UpperCamelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCamelCase__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> int:
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(UpperCamelCase__ )
def _UpperCamelCase ( self : Dict ) -> List[Any]:
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.dummy_cond_unet_upscale
_UpperCamelCase = DDPMScheduler()
_UpperCamelCase = DDIMScheduler(prediction_type='''v_prediction''' )
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_UpperCamelCase = StableDiffusionUpscalePipeline(
unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , max_noise_level=350 , )
_UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_UpperCamelCase = '''A painting of a squirrel eating a burger'''
_UpperCamelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase = output.images
_UpperCamelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCamelCase__ , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
_UpperCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_UpperCamelCase = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self : Optional[Any] ) -> int:
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.dummy_cond_unet_upscale
_UpperCamelCase = DDPMScheduler()
_UpperCamelCase = DDIMScheduler(prediction_type='''v_prediction''' )
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_UpperCamelCase = StableDiffusionUpscalePipeline(
unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , max_noise_level=350 , )
_UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_UpperCamelCase = '''A painting of a squirrel eating a burger'''
_UpperCamelCase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase = output.images
assert image.shape[0] == 2
_UpperCamelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _UpperCamelCase ( self : Optional[Any] ) -> List[str]:
_UpperCamelCase = self.dummy_cond_unet_upscale
_UpperCamelCase = DDPMScheduler()
_UpperCamelCase = DDIMScheduler(prediction_type='''v_prediction''' )
_UpperCamelCase = self.dummy_vae
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_UpperCamelCase = unet.half()
_UpperCamelCase = text_encoder.half()
# make sure here that pndm scheduler skips prk
_UpperCamelCase = StableDiffusionUpscalePipeline(
unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , max_noise_level=350 , )
_UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_UpperCamelCase = '''A painting of a squirrel eating a burger'''
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , ).images
_UpperCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : List[str] ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
_UpperCamelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
_UpperCamelCase = StableDiffusionUpscalePipeline.from_pretrained(UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
_UpperCamelCase = '''a cat sitting on a park bench'''
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''np''' , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _UpperCamelCase ( self : List[Any] ) -> List[str]:
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
_UpperCamelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
_UpperCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase__ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
_UpperCamelCase = '''a cat sitting on a park bench'''
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''np''' , )
_UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _UpperCamelCase ( self : List[str] ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
_UpperCamelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
_UpperCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase__ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = '''a cat sitting on a park bench'''
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , output_type='''np''' , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 256 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _snake_case , unittest.TestCase ):
lowercase = ShapEPipeline
lowercase = ["prompt"]
lowercase = ["prompt"]
lowercase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
lowercase = False
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return 8
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
A_ = PriorTransformer(**UpperCamelCase__ )
return model
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
A_ = ShapERenderer(**UpperCamelCase__ )
return model
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.dummy_prior
A_ = self.dummy_text_encoder
A_ = self.dummy_tokenizer
A_ = self.dummy_renderer
A_ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase__ , clip_sample=UpperCamelCase__ , clip_sample_range=1.0 , )
A_ = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> List[str]:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = """cpu"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ = output.images[0]
A_ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
A_ = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = 1
A_ = 2
A_ = self.get_dummy_inputs(UpperCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
A_ = batch_size * [inputs[key]]
A_ = pipe(**UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
A_ = ShapEPipeline.from_pretrained("""openai/shap-e""" )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
A_ = pipe(
"""a shark""" , generator=UpperCamelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 162 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : Optional[Any] = ShapEImgaImgPipeline
snake_case__ : Union[str, Any] = ["""image"""]
snake_case__ : Optional[Any] = ["""image"""]
snake_case__ : Any = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case__ : Any = False
@property
def _A ( self : Any ):
return 32
@property
def _A ( self : Tuple ):
return 32
@property
def _A ( self : List[str] ):
return self.time_input_dim * 4
@property
def _A ( self : Optional[Any] ):
return 8
@property
def _A ( self : Tuple ):
torch.manual_seed(0 )
UpperCamelCase :Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
UpperCamelCase :List[Any] = CLIPVisionModel(__lowerCamelCase )
return model
@property
def _A ( self : List[str] ):
UpperCamelCase :Optional[int] = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowerCamelCase , do_normalize=__lowerCamelCase , do_resize=__lowerCamelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def _A ( self : List[Any] ):
torch.manual_seed(0 )
UpperCamelCase :Optional[int] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
UpperCamelCase :int = PriorTransformer(**__lowerCamelCase )
return model
@property
def _A ( self : List[str] ):
torch.manual_seed(0 )
UpperCamelCase :List[str] = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
UpperCamelCase :Tuple = ShapERenderer(**__lowerCamelCase )
return model
def _A ( self : Dict ):
UpperCamelCase :Tuple = self.dummy_prior
UpperCamelCase :Dict = self.dummy_image_encoder
UpperCamelCase :List[str] = self.dummy_image_processor
UpperCamelCase :List[Any] = self.dummy_renderer
UpperCamelCase :Dict = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1_024 , prediction_type="""sample""" , use_karras_sigmas=__lowerCamelCase , clip_sample=__lowerCamelCase , clip_sample_range=1.0 , )
UpperCamelCase :Tuple = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _A ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str]=0 ):
UpperCamelCase :Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCamelCase :Optional[int] = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase :Union[str, Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Tuple = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _A ( self : Tuple ):
UpperCamelCase :Dict = """cpu"""
UpperCamelCase :List[str] = self.get_dummy_components()
UpperCamelCase :Optional[int] = self.pipeline_class(**__lowerCamelCase )
UpperCamelCase :Optional[int] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
UpperCamelCase :Any = output.images[0]
UpperCamelCase :str = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase :List[str] = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self : Union[str, Any] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _A ( self : Optional[int] ):
UpperCamelCase :Union[str, Any] = torch_device == """cpu"""
UpperCamelCase :Union[str, Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowerCamelCase , relax_max_difference=__lowerCamelCase , )
def _A ( self : Any ):
UpperCamelCase :int = self.get_dummy_components()
UpperCamelCase :List[str] = self.pipeline_class(**__lowerCamelCase )
UpperCamelCase :Optional[Any] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = 1
UpperCamelCase :int = 2
UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowerCamelCase )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase :Tuple = batch_size * [inputs[key]]
UpperCamelCase :Tuple = pipe(**__lowerCamelCase , num_images_per_prompt=__lowerCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : str ):
UpperCamelCase :Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
UpperCamelCase :Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
UpperCamelCase :Any = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
UpperCamelCase :str = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Any = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCamelCase :Tuple = pipe(
__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 362 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
snake_case__ : Dict = StableDiffusionControlNetImgaImgPipeline
snake_case__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
snake_case__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
snake_case__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _A ( self : List[str] ):
torch.manual_seed(0 )
UpperCamelCase :str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
UpperCamelCase :str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
UpperCamelCase :List[str] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
UpperCamelCase :Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase :Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase :Union[str, Any] = CLIPTextModel(__lowerCamelCase )
UpperCamelCase :Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase :Any = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _A ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=0 ):
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCamelCase :Optional[int] = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase :List[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Optional[Any] = 2
UpperCamelCase :Optional[int] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__lowerCamelCase , device=torch.device(__lowerCamelCase ) , )
UpperCamelCase :Tuple = floats_tensor(control_image.shape , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
UpperCamelCase :str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase :Optional[Any] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
UpperCamelCase :str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def _A ( self : Dict ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _A ( self : Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def _A ( self : Optional[Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case__ : Optional[Any] = StableDiffusionControlNetImgaImgPipeline
snake_case__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
snake_case__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _A ( self : List[Any] ):
torch.manual_seed(0 )
UpperCamelCase :Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__lowerCamelCase : Union[str, Any] ):
if isinstance(__lowerCamelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCamelCase :Union[str, Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__lowerCamelCase )
torch.manual_seed(0 )
UpperCamelCase :Tuple = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__lowerCamelCase )
torch.manual_seed(0 )
UpperCamelCase :str = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
UpperCamelCase :Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase :Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase :List[Any] = CLIPTextModel(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase :Optional[Any] = MultiControlNetModel([controlneta, controlneta] )
UpperCamelCase :int = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _A ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=0 ):
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCamelCase :Dict = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase :Tuple = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Any = 2
UpperCamelCase :List[str] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__lowerCamelCase , device=torch.device(__lowerCamelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__lowerCamelCase , device=torch.device(__lowerCamelCase ) , ),
]
UpperCamelCase :int = floats_tensor(control_image[0].shape , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
UpperCamelCase :List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase :Union[str, Any] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
UpperCamelCase :Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def _A ( self : List[str] ):
UpperCamelCase :Union[str, Any] = self.get_dummy_components()
UpperCamelCase :List[str] = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
UpperCamelCase :Optional[Any] = 10.0
UpperCamelCase :str = 4
UpperCamelCase :Optional[int] = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :str = steps
UpperCamelCase :Tuple = scale
UpperCamelCase :List[str] = pipe(**__lowerCamelCase )[0]
UpperCamelCase :Optional[int] = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :List[Any] = steps
UpperCamelCase :str = scale
UpperCamelCase :int = pipe(**__lowerCamelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCamelCase :List[str] = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :Optional[Any] = steps
UpperCamelCase :str = scale
UpperCamelCase :Any = pipe(**__lowerCamelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCamelCase :Tuple = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = steps
UpperCamelCase :str = scale
UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def _A ( self : Any ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _A ( self : Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def _A ( self : Dict ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def _A ( self : Any ):
UpperCamelCase :List[str] = self.get_dummy_components()
UpperCamelCase :List[str] = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__lowerCamelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : List[str] ):
UpperCamelCase :Tuple = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
UpperCamelCase :List[Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowerCamelCase , controlnet=__lowerCamelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase :Optional[int] = """evil space-punk bird"""
UpperCamelCase :List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
UpperCamelCase :List[str] = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
UpperCamelCase :str = pipe(
__lowerCamelCase , __lowerCamelCase , control_image=__lowerCamelCase , generator=__lowerCamelCase , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
UpperCamelCase :int = output.images[0]
assert image.shape == (512, 512, 3)
UpperCamelCase :Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 62 | 0 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _snake_case ( ):
UpperCAmelCase : List[str] = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
UpperCAmelCase : List[str] = Dataset.from_dict(_snake_case )
return dataset
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : str = get_dataset()
UpperCAmelCase : Optional[Any] = make_duplicate_clusters(_a , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = get_dataset()
UpperCAmelCase : List[Any] = deduplicate_dataset(_a )
self.assertEqual(len(_a ) , 2 )
print(_a )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _a )
| 109 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _snake_case :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_a , **_a ):
pass
def lowerCAmelCase_ ( _snake_case : Image ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowerCAmelCase_ ( _snake_case : Image ) -> Dict:
'''simple docstring'''
__magic_name__ : List[Any] = np.array(_snake_case )
__magic_name__ : Optional[int] = npimg.shape
return {"hash": hashimage(_snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
UpperCamelCase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Dict = MaskGenerationPipeline(model=_a , image_processor=_a )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
__magic_name__ : str = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Dict = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = "facebook/sam-vit-huge"
__magic_name__ : str = pipeline("mask-generation" , model=_a )
__magic_name__ : Tuple = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__magic_name__ : Any = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] , )
| 281 | 0 |
def __UpperCamelCase ( _lowerCAmelCase ) -> Tuple: # noqa: E741
"""simple docstring"""
A : Union[str, Any] = len(__lowerCAmelCase )
A : Union[str, Any] = 0
A : Tuple = [0] * n
A : str = [False] * n
A : Tuple = [False] * n
def dfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if parent == root:
out_edge_count += 1
A : Optional[Any] = True
A : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A : List[Any] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
A : Optional[Any] = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A : List[str] = True
# AP found via cycle
if at == low[to]:
A : Optional[int] = True
else:
A : Optional[int] = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
A : int = 0
A : Optional[int] = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
A : Optional[Any] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
SCREAMING_SNAKE_CASE_:Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 362 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Union[str, Any] = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Dict = "conditional_detr"
__lowerCamelCase : str = ["past_key_values"]
__lowerCamelCase : str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self, lowerCamelCase__=True, lowerCamelCase__=None, lowerCamelCase__=3, lowerCamelCase__=300, lowerCamelCase__=6, lowerCamelCase__=2048, lowerCamelCase__=8, lowerCamelCase__=6, lowerCamelCase__=2048, lowerCamelCase__=8, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=True, lowerCamelCase__="relu", lowerCamelCase__=256, lowerCamelCase__=0.1, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=0.02, lowerCamelCase__=1.0, lowerCamelCase__=False, lowerCamelCase__="sine", lowerCamelCase__="resnet50", lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=2, lowerCamelCase__=5, lowerCamelCase__=2, lowerCamelCase__=1, lowerCamelCase__=1, lowerCamelCase__=2, lowerCamelCase__=5, lowerCamelCase__=2, lowerCamelCase__=0.25, **lowerCamelCase__, ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : Any = backbone_config.get("""model_type""" )
A : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
A : Tuple = config_class.from_dict(lowerCamelCase__ )
A : Dict = use_timm_backbone
A : int = backbone_config
A : Union[str, Any] = num_channels
A : Optional[Any] = num_queries
A : Union[str, Any] = d_model
A : str = encoder_ffn_dim
A : List[Any] = encoder_layers
A : Tuple = encoder_attention_heads
A : Union[str, Any] = decoder_ffn_dim
A : Tuple = decoder_layers
A : int = decoder_attention_heads
A : Union[str, Any] = dropout
A : List[str] = attention_dropout
A : Optional[int] = activation_dropout
A : Optional[Any] = activation_function
A : Any = init_std
A : List[Any] = init_xavier_std
A : Any = encoder_layerdrop
A : List[str] = decoder_layerdrop
A : int = encoder_layers
A : Union[str, Any] = auxiliary_loss
A : Union[str, Any] = position_embedding_type
A : Tuple = backbone
A : Dict = use_pretrained_backbone
A : int = dilation
# Hungarian matcher
A : List[Any] = class_cost
A : List[Any] = bbox_cost
A : int = giou_cost
# Loss coefficients
A : List[Any] = mask_loss_coefficient
A : Any = dice_loss_coefficient
A : int = cls_loss_coefficient
A : Tuple = bbox_loss_coefficient
A : List[Any] = giou_loss_coefficient
A : int = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase__, **lowerCamelCase__ )
@property
def _lowerCAmelCase ( self ):
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self ):
return self.d_model
def _lowerCAmelCase ( self ):
A : Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A : List[Any] = self.backbone_config.to_dict()
A : List[str] = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Tuple = version.parse("1.11" )
@property
def _lowerCAmelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _lowerCAmelCase ( self ):
return 1e-5
@property
def _lowerCAmelCase ( self ):
return 12
| 115 | 0 |
def a_ ( __lowercase : int ) -> int:
_snake_case = abs(__lowercase )
_snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def a_ ( __lowercase : int ) -> int:
_snake_case = abs(__lowercase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def a_ ( __lowercase : int ) -> int:
return sum(int(__lowercase ) for c in str(abs(__lowercase ) ) )
def a_ ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowercase : Callable , __lowercase : int ) -> None:
_snake_case = f'''{func.__name__}({value})'''
_snake_case = timeit(f'''__main__.{call}''' , setup='import __main__' )
print(f'''{call:56} = {func(__lowercase )} -- {timing:.4f} seconds''' )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__lowercase , __lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 282 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = '''▁'''
_lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Any = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
_lowerCamelCase : Optional[int] = {
'''google/pegasus-xsum''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Any = PegasusTokenizer
_UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowercase : str=None , lowercase : Any=None , lowercase : List[Any]="<pad>" , lowercase : List[Any]="</s>" , lowercase : Tuple="<unk>" , lowercase : Any="<mask_2>" , lowercase : List[str]="<mask_1>" , lowercase : List[Any]=None , lowercase : Dict=103 , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowercase )}, but is'''
f''' {type(lowercase )}''' )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowercase , tokenizer_file=lowercase , pad_token=lowercase , eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , **lowercase , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def A ( self : List[str] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def A ( self : List[Any] , lowercase : List , lowercase : Optional[List] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A ( self : Any , lowercase : Tuple , lowercase : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self : int , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,) | 282 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : List[str]=0.0 , UpperCamelCase : Optional[int] = None , UpperCamelCase : str = "geglu" , UpperCamelCase : Optional[int] = None , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = True , UpperCamelCase : str = "layer_norm" , UpperCamelCase : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case : List[Any] = only_cross_attention
_snake_case : str = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
_snake_case : str = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
f""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_snake_case : List[Any] = AdaLayerNorm(UpperCamelCase , UpperCamelCase )
elif self.use_ada_layer_norm_zero:
_snake_case : Tuple = AdaLayerNormZero(UpperCamelCase , UpperCamelCase )
else:
_snake_case : Optional[int] = nn.LayerNorm(UpperCamelCase , elementwise_affine=UpperCamelCase )
_snake_case : Any = Attention(
query_dim=UpperCamelCase , heads=UpperCamelCase , dim_head=UpperCamelCase , dropout=UpperCamelCase , bias=UpperCamelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_snake_case : Dict = (
AdaLayerNorm(UpperCamelCase , UpperCamelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase , elementwise_affine=UpperCamelCase )
)
_snake_case : str = Attention(
query_dim=UpperCamelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase , dim_head=UpperCamelCase , dropout=UpperCamelCase , bias=UpperCamelCase , upcast_attention=UpperCamelCase , ) # is self-attn if encoder_hidden_states is none
else:
_snake_case : Dict = None
_snake_case : List[str] = None
# 3. Feed-forward
_snake_case : Any = nn.LayerNorm(UpperCamelCase , elementwise_affine=UpperCamelCase )
_snake_case : Any = FeedForward(UpperCamelCase , dropout=UpperCamelCase , activation_fn=UpperCamelCase , final_dropout=UpperCamelCase )
# let chunk size default to None
_snake_case : str = None
_snake_case : List[str] = 0
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : int ):
'''simple docstring'''
_snake_case : Optional[int] = chunk_size
_snake_case : str = dim
def UpperCamelCase_ ( self : str , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[torch.LongTensor] = None , UpperCamelCase : Dict[str, Any] = None , UpperCamelCase : Optional[torch.LongTensor] = None , ):
'''simple docstring'''
if self.use_ada_layer_norm:
_snake_case : Tuple = self.norma(UpperCamelCase , UpperCamelCase )
elif self.use_ada_layer_norm_zero:
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[str] = self.norma(
UpperCamelCase , UpperCamelCase , UpperCamelCase , hidden_dtype=hidden_states.dtype )
else:
_snake_case : List[Any] = self.norma(UpperCamelCase )
_snake_case : Any = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_snake_case : Optional[int] = self.attna(
UpperCamelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase , **UpperCamelCase , )
if self.use_ada_layer_norm_zero:
_snake_case : Optional[Any] = gate_msa.unsqueeze(1 ) * attn_output
_snake_case : Union[str, Any] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_snake_case : int = (
self.norma(UpperCamelCase , UpperCamelCase ) if self.use_ada_layer_norm else self.norma(UpperCamelCase )
)
_snake_case : List[str] = self.attna(
UpperCamelCase , encoder_hidden_states=UpperCamelCase , attention_mask=UpperCamelCase , **UpperCamelCase , )
_snake_case : Optional[int] = attn_output + hidden_states
# 3. Feed-forward
_snake_case : Optional[int] = self.norma(UpperCamelCase )
if self.use_ada_layer_norm_zero:
_snake_case : Optional[int] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
_snake_case : Union[str, Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_snake_case : Union[str, Any] = torch.cat(
[self.ff(UpperCamelCase ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
_snake_case : int = self.ff(UpperCamelCase )
if self.use_ada_layer_norm_zero:
_snake_case : Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output
_snake_case : Tuple = ff_output + hidden_states
return hidden_states
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , UpperCamelCase : int = 4 , UpperCamelCase : float = 0.0 , UpperCamelCase : str = "geglu" , UpperCamelCase : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[Any] = int(dim * mult )
_snake_case : Any = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_snake_case : Any = GELU(UpperCamelCase , UpperCamelCase )
if activation_fn == "gelu-approximate":
_snake_case : Optional[int] = GELU(UpperCamelCase , UpperCamelCase , approximate='tanh' )
elif activation_fn == "geglu":
_snake_case : str = GEGLU(UpperCamelCase , UpperCamelCase )
elif activation_fn == "geglu-approximate":
_snake_case : Tuple = ApproximateGELU(UpperCamelCase , UpperCamelCase )
_snake_case : int = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase ) )
# project out
self.net.append(nn.Linear(UpperCamelCase , UpperCamelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase ) )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
for module in self.net:
_snake_case : Any = module(UpperCamelCase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : str = "none" ):
'''simple docstring'''
super().__init__()
_snake_case : List[Any] = nn.Linear(UpperCamelCase , UpperCamelCase )
_snake_case : Dict = approximate
def UpperCamelCase_ ( self : int , UpperCamelCase : int ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(UpperCamelCase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Optional[int] = self.proj(UpperCamelCase )
_snake_case : Dict = self.gelu(UpperCamelCase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
super().__init__()
_snake_case : Any = nn.Linear(UpperCamelCase , dim_out * 2 )
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(UpperCamelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase_ ( self : Any , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case : Tuple = self.proj(UpperCamelCase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase )
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
super().__init__()
_snake_case : Dict = nn.Linear(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.proj(UpperCamelCase )
return x * torch.sigmoid(1.7_02 * x )
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase : Any , UpperCamelCase : int ):
'''simple docstring'''
super().__init__()
_snake_case : int = nn.Embedding(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = nn.SiLU()
_snake_case : Union[str, Any] = nn.Linear(UpperCamelCase , embedding_dim * 2 )
_snake_case : Union[str, Any] = nn.LayerNorm(UpperCamelCase , elementwise_affine=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.linear(self.silu(self.emb(UpperCamelCase ) ) )
_snake_case , _snake_case : Optional[Any] = torch.chunk(UpperCamelCase , 2 )
_snake_case : Optional[Any] = self.norm(UpperCamelCase ) * (1 + scale) + shift
return x
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
super().__init__()
_snake_case : Tuple = CombinedTimestepLabelEmbeddings(UpperCamelCase , UpperCamelCase )
_snake_case : int = nn.SiLU()
_snake_case : Optional[int] = nn.Linear(UpperCamelCase , 6 * embedding_dim , bias=UpperCamelCase )
_snake_case : Optional[int] = nn.LayerNorm(UpperCamelCase , elementwise_affine=UpperCamelCase , eps=1e-6 )
def UpperCamelCase_ ( self : Any , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict=None ):
'''simple docstring'''
_snake_case : Optional[int] = self.linear(self.silu(self.emb(UpperCamelCase , UpperCamelCase , hidden_dtype=UpperCamelCase ) ) )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Tuple = emb.chunk(6 , dim=1 )
_snake_case : int = self.norm(UpperCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Optional[str] = None , UpperCamelCase : float = 1e-5 ):
'''simple docstring'''
super().__init__()
_snake_case : Dict = num_groups
_snake_case : Optional[Any] = eps
if act_fn is None:
_snake_case : Union[str, Any] = None
else:
_snake_case : Optional[int] = get_activation(UpperCamelCase )
_snake_case : List[str] = nn.Linear(UpperCamelCase , out_dim * 2 )
def UpperCamelCase_ ( self : int , UpperCamelCase : int , UpperCamelCase : str ):
'''simple docstring'''
if self.act:
_snake_case : Any = self.act(UpperCamelCase )
_snake_case : Union[str, Any] = self.linear(UpperCamelCase )
_snake_case : Dict = emb[:, :, None, None]
_snake_case , _snake_case : Any = emb.chunk(2 , dim=1 )
_snake_case : List[str] = F.group_norm(UpperCamelCase , self.num_groups , eps=self.eps )
_snake_case : Dict = x * (1 + scale) + shift
return x
| 260 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , *UpperCamelCase : int , **UpperCamelCase : Dict ):
'''simple docstring'''
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 260 | 1 |
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase="" ,__UpperCAmelCase="train" ) -> Optional[int]:
assert os.path.isdir(__UpperCAmelCase )
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : List[Any] = os.listdir(__UpperCAmelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCAmelCase__ : Union[str, Any] = os.path.join(__UpperCAmelCase ,__UpperCAmelCase )
if not os.path.isfile(__UpperCAmelCase ):
continue
self.documents.append(__UpperCAmelCase )
def __len__( self ) -> Optional[Any]:
return len(self.documents )
def __getitem__( self ,__UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Tuple = self.documents[idx]
lowerCAmelCase__ : List[str] = document_path.split("""/""" )[-1]
with open(__UpperCAmelCase ,encoding="""utf-8""" ) as source:
lowerCAmelCase__ : Tuple = source.read()
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = process_story(__UpperCAmelCase )
return document_name, story_lines, summary_lines
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = list(filter(lambda UpperCamelCase : len(UpperCamelCase ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
lowerCAmelCase__ : List[Any] = [_add_missing_period(UpperCamelCase ) for line in nonempty_lines]
# gather article lines
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : List[Any] = deque(UpperCamelCase )
while True:
try:
lowerCAmelCase__ : Union[str, Any] = lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(UpperCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCAmelCase__ : List[str] = list(filter(lambda UpperCamelCase : not t.startswith("""@highlight""" ) , UpperCamelCase ) )
return story_lines, summary_lines
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : int = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if len(UpperCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(UpperCamelCase )) )
return sequence
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = torch.ones_like(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = sequence == pad_token_id
lowerCAmelCase__ : Dict = 0
return mask
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = [tokenizer.encode(UpperCamelCase ) for line in story_lines]
lowerCAmelCase__ : Tuple = [token for sentence in story_lines_token_ids for token in sentence]
lowerCAmelCase__ : Optional[int] = [tokenizer.encode(UpperCamelCase ) for line in summary_lines]
lowerCAmelCase__ : List[str] = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = []
for sequence in batch:
lowerCAmelCase__ : Optional[int] = -1
lowerCAmelCase__ : Optional[Any] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(UpperCamelCase )
return torch.tensor(UpperCamelCase )
| 37 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
lowerCAmelCase__ : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase__ : str = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase__ : Dict = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape ,__UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) )
@slow
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Union[str, Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
lowerCAmelCase__ : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase__ : Dict = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase__ : Union[str, Any] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape ,__UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) )
| 37 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger(__name__)
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple=False ) -> Dict:
'''simple docstring'''
_a = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
_a = 'segformer.encoder.' + key
if key.startswith('backbone' ):
_a = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_a = key[key.find('patch_embed' ) + len('patch_embed' )]
_a = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(lowerCAmelCase__ )-1}' )
if "norm" in key:
_a = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_a = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
_a = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(lowerCAmelCase__ )-1}' )
if "layer_norm1" in key:
_a = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
_a = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
_a = key[key.find('block' ) + len('block' )]
_a = key.replace(f'block{idx}' , f'block.{int(lowerCAmelCase__ )-1}' )
if "attn.q" in key:
_a = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
_a = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
_a = key.replace('attn' , 'attention.self' )
if "fc1" in key:
_a = key.replace('fc1' , 'dense1' )
if "fc2" in key:
_a = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
_a = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
_a = key.replace('linear_fuse.conv' , 'linear_fuse' )
_a = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_a = key[key.find('linear_c' ) + len('linear_c' )]
_a = key.replace(f'linear_c{idx}' , f'linear_c.{int(lowerCAmelCase__ )-1}' )
if key.startswith('head' ):
_a = key.replace('head' , 'classifier' )
_a = value
return new_state_dict
def _A (lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] ) -> List[Any]:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_a = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
_a = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
_a = kv_weight[
: config.hidden_sizes[i], :
]
_a = kv_bias[: config.hidden_sizes[i]]
_a = kv_weight[
config.hidden_sizes[i] :, :
]
_a = kv_bias[
config.hidden_sizes[i] :
]
def _A () -> Any:
'''simple docstring'''
_a = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return image
@torch.no_grad()
def _A (lowerCAmelCase__ :str , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_a = SegformerConfig()
_a = False
# set attributes based on model_name
_a = 'huggingface/label-files'
if "segformer" in model_name:
_a = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
_a = 1_50
_a = 'ade20k-id2label.json'
_a = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
_a = 19
_a = 'cityscapes-id2label.json'
_a = (1, 19, 1_28, 1_28)
else:
raise ValueError(f'Model {model_name} not supported' )
elif "mit" in model_name:
_a = True
_a = model_name[4:6]
_a = 10_00
_a = 'imagenet-1k-id2label.json'
_a = (1, 10_00)
else:
raise ValueError(f'Model {model_name} not supported' )
# set config attributes
_a = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) )
_a = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_a = [64, 1_28, 3_20, 5_12]
_a = 2_56
elif size == "b2":
_a = [64, 1_28, 3_20, 5_12]
_a = 7_68
_a = [3, 4, 6, 3]
elif size == "b3":
_a = [64, 1_28, 3_20, 5_12]
_a = 7_68
_a = [3, 4, 18, 3]
elif size == "b4":
_a = [64, 1_28, 3_20, 5_12]
_a = 7_68
_a = [3, 8, 27, 3]
elif size == "b5":
_a = [64, 1_28, 3_20, 5_12]
_a = 7_68
_a = [3, 6, 40, 3]
else:
raise ValueError(f'Size {size} not supported' )
# load image processor (only resize + normalize)
_a = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__ )
# prepare image
_a = prepare_img()
_a = image_processor(images=lowerCAmelCase__ , return_tensors='pt' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
if encoder_only:
_a = torch.load(lowerCAmelCase__ , map_location=torch.device('cpu' ) )
else:
_a = torch.load(lowerCAmelCase__ , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
_a = rename_keys(lowerCAmelCase__ , encoder_only=lowerCAmelCase__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# create HuggingFace model and load state dict
if encoder_only:
_a = False
_a = SegformerForImageClassification(lowerCAmelCase__ )
else:
_a = SegformerForSemanticSegmentation(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# forward pass
_a = model(lowerCAmelCase__ )
_a = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_a = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_a = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_a = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_a = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_a = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_a = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_a = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_a = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_a = torch.tensor(
[
[
[-1.1372E01, -1.2787E01, -1.3477E01],
[-1.2536E01, -1.4194E01, -1.4409E01],
[-1.3217E01, -1.4888E01, -1.5327E01],
],
[
[-1.4791E01, -1.7122E01, -1.8277E01],
[-1.7163E01, -1.9192E01, -1.9533E01],
[-1.7897E01, -1.9991E01, -2.0315E01],
],
[
[7.6723E-01, 4.1921E-01, -7.7878E-02],
[4.7772E-01, 9.5557E-03, -2.8082E-01],
[3.6032E-01, -2.4826E-01, -5.1168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_a = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_a = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_a = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_a = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_a = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_a = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
_a = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
a_ : str = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
a_ : Optional[Any] = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 104 |
'''simple docstring'''
from timeit import timeit
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
_a = 0
while number:
number &= number - 1
result += 1
return result
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
_a = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _A () -> None:
'''simple docstring'''
def do_benchmark(lowerCAmelCase__ :int ) -> None:
_a = 'import __main__ as z'
print(f'Benchmark when {number = }:' )
print(f'{get_set_bits_count_using_modulo_operator(lowerCAmelCase__ ) = }' )
_a = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=lowerCAmelCase__ )
print(f'timeit() runs in {timing} seconds' )
print(f'{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase__ ) = }' )
_a = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=lowerCAmelCase__ , )
print(f'timeit() runs in {timing} seconds' )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 104 | 1 |
from math import pi, sqrt
def A_ ( snake_case : float ) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(snake_case ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(snake_case )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def A_ ( ) -> None:
'''simple docstring'''
assert gamma(0.5 ) == sqrt(snake_case )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase__ : str = 1.0
while num:
lowercase__ : List[Any] = float(input("Gamma of: "))
print(F"gamma({num}) = {gamma(num)}")
print("\nEnter 0 to exit...")
| 328 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , **SCREAMING_SNAKE_CASE_ , )-> Optional[int]:
'''simple docstring'''
super().__init__(features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = Sql(
cache_dir=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , sql=SCREAMING_SNAKE_CASE_ , con=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE_ , download_mode=SCREAMING_SNAKE_CASE_ , verification_mode=SCREAMING_SNAKE_CASE_ , base_path=SCREAMING_SNAKE_CASE_ , )
# Build dataset for splits
__UpperCamelCase = self.builder.as_dataset(
split='''train''' , verification_mode=SCREAMING_SNAKE_CASE_ , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> List[str]:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
__UpperCamelCase = dataset
__UpperCamelCase = name
__UpperCamelCase = con
__UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__UpperCamelCase = num_proc
__UpperCamelCase = to_sql_kwargs
def A__ ( self )-> int:
'''simple docstring'''
__UpperCamelCase = self.to_sql_kwargs.pop('''sql''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.to_sql_kwargs.pop('''con''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.to_sql_kwargs.pop('''index''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self._write(index=SCREAMING_SNAKE_CASE_ , **self.to_sql_kwargs )
return written
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = args
__UpperCamelCase = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__UpperCamelCase = query_table(
table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__UpperCamelCase = batch.to_pandas()
__UpperCamelCase = df.to_sql(self.name , self.con , index=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return num_rows or len(SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
__UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__UpperCamelCase , __UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 328 | 1 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_lowercase : List[str] = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class __SCREAMING_SNAKE_CASE ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self : List[Any], lowerCamelCase : str = " " )-> List[str]:
lowerCamelCase__ : List[str] =sentence_delimiter
def snake_case ( self : Any, lowerCamelCase : str )-> Optional[Any]:
return list(lowerCamelCase )
def snake_case ( self : Optional[Any], lowerCamelCase : List[str] )-> Tuple:
lowerCamelCase__ : Optional[int] =[]
for sent_idx, sentence in enumerate(lowerCamelCase ):
chars.extend(self.process_string(lowerCamelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCamelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
_lowercase : Optional[int] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_lowercase : List[str] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_lowercase : Dict = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
_lowercase : List[Any] = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
_lowercase : Dict = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def snake_case ( self : Dict )-> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Value('''string''', id='''sequence''' ),
} ), codebase_urls=['''https://github.com/jitsi/jiwer/'''], reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
], )
def snake_case ( self : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Dict=False )-> List[Any]:
if concatenate_texts:
return jiwer.compute_measures(
lowerCamelCase, lowerCamelCase, truth_transform=lowerCamelCase, hypothesis_transform=lowerCamelCase, )["wer"]
lowerCamelCase__ : Optional[Any] =0
lowerCamelCase__ : Union[str, Any] =0
for prediction, reference in zip(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =jiwer.compute_measures(
lowerCamelCase, lowerCamelCase, truth_transform=lowerCamelCase, hypothesis_transform=lowerCamelCase, )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 368 |
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int , __lowerCamelCase : float = 1 , __lowerCamelCase : float = 1 , __lowerCamelCase : float = 1.0e4 , __lowerCamelCase : bool = False , __lowerCamelCase : float = 1.0 , ):
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
lowerCamelCase__ : Any =float(embedding_dim // 2 )
lowerCamelCase__ : List[str] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCamelCase__ : int =min_timescale * jnp.exp(jnp.arange(__lowerCamelCase , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCamelCase__ : Tuple =jnp.expand_dims(__lowerCamelCase , 1 ) * jnp.expand_dims(__lowerCamelCase , 0 )
# scale embeddings
lowerCamelCase__ : List[str] =scale * emb
if flip_sin_to_cos:
lowerCamelCase__ : int =jnp.concatenate([jnp.cos(__lowerCamelCase ), jnp.sin(__lowerCamelCase )] , axis=1 )
else:
lowerCamelCase__ : List[str] =jnp.concatenate([jnp.sin(__lowerCamelCase ), jnp.cos(__lowerCamelCase )] , axis=1 )
lowerCamelCase__ : str =jnp.reshape(__lowerCamelCase , [jnp.shape(__lowerCamelCase )[0], embedding_dim] )
return signal
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 3_2
_a = jnp.floataa
@nn.compact
def __call__( self : Optional[Any], lowerCamelCase : int )-> Any:
lowerCamelCase__ : Optional[Any] =nn.Dense(self.time_embed_dim, dtype=self.dtype, name='''linear_1''' )(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =nn.Dense(self.time_embed_dim, dtype=self.dtype, name='''linear_2''' )(lowerCamelCase )
return temb
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 3_2
_a = False
_a = 1
@nn.compact
def __call__( self : Any, lowerCamelCase : int )-> int:
return get_sinusoidal_embeddings(
lowerCamelCase, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift )
| 272 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class UpperCAmelCase_ :
lowerCamelCase__ : CommonSchedulerState
# setable values
lowerCamelCase__ : jnp.ndarray
lowerCamelCase__ : jnp.ndarray
lowerCamelCase__ : Optional[int] = None
@classmethod
def _UpperCAmelCase ( cls , a , a , a ) -> Optional[int]:
return cls(common=a , init_noise_sigma=a , timesteps=a )
@dataclass
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : DDPMSchedulerState
class UpperCAmelCase_ ( _a , _a):
lowerCamelCase__ : Tuple = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCamelCase__ : jnp.dtype
@property
def _UpperCAmelCase ( self ) -> str:
return True
@register_to_config
def __init__( self , a = 1_0_0_0 , a = 0.0_001 , a = 0.02 , a = "linear" , a = None , a = "fixed_small" , a = True , a = "epsilon" , a = jnp.floataa , ) -> str:
lowercase__ : List[Any] = dtype
def _UpperCAmelCase ( self , a = None ) -> DDPMSchedulerState:
if common is None:
lowercase__ : str = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ : int = jnp.array(1.0 , dtype=self.dtype )
lowercase__ : List[str] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=a , init_noise_sigma=a , timesteps=a , )
def _UpperCAmelCase ( self , a , a , a = None ) -> jnp.ndarray:
return sample
def _UpperCAmelCase ( self , a , a , a = () ) -> DDPMSchedulerState:
lowercase__ : Tuple = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ : Optional[int] = (jnp.arange(0 , a ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=a , timesteps=a , )
def _UpperCAmelCase ( self , a , a , a=None , a=None ) -> Optional[int]:
lowercase__ : Any = state.common.alphas_cumprod[t]
lowercase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ : Optional[int] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ : List[str] = jnp.clip(a , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ : Any = jnp.log(jnp.clip(a , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowercase__ : Dict = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ : int = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ : List[str] = variance
lowercase__ : Union[str, Any] = state.common.betas[t]
lowercase__ : str = (predicted_variance + 1) / 2
lowercase__ : Optional[int] = frac * max_log + (1 - frac) * min_log
return variance
def _UpperCAmelCase ( self , a , a , a , a , a = None , a = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
lowercase__ : Dict = timestep
if key is None:
lowercase__ : Optional[int] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ : List[Any] = jnp.split(a , sample.shape[1] , axis=1 )
else:
lowercase__ : Tuple = None
# 1. compute alphas, betas
lowercase__ : int = state.common.alphas_cumprod[t]
lowercase__ : List[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ : Tuple = 1 - alpha_prod_t
lowercase__ : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : List[str] = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : Any = jnp.clip(a , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ : Optional[int] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ : Optional[int] = jax.random.split(a , num=1 )
lowercase__ : List[Any] = jax.random.normal(a , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(a , a , predicted_variance=a ) ** 0.5) * noise
lowercase__ : List[str] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=a , state=a )
def _UpperCAmelCase ( self , a , a , a , a , ) -> jnp.ndarray:
return add_noise_common(state.common , a , a , a )
def _UpperCAmelCase ( self , a , a , a , a , ) -> jnp.ndarray:
return get_velocity_common(state.common , a , a , a )
def __len__( self ) -> Optional[Any]:
return self.config.num_train_timesteps
| 77 | """simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_UpperCamelCase : Any = re.compile(r"\b(a|an|the)\b", re.UNICODE)
_UpperCamelCase : Union[str, Any] = None
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=_lowerCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=_lowerCAmelCase , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase__ : Optional[int] = bool(qa['answers']['text'] )
return qid_to_has_ans
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
def remove_articles(_lowerCAmelCase : int ):
return ARTICLES_REGEX.sub(' ' , _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase : str ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase : List[Any] ):
lowercase__ : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
if not s:
return []
return normalize_answer(_lowerCAmelCase ).split()
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
'''simple docstring'''
return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : Dict = get_tokens(_lowerCAmelCase )
lowercase__ : List[str] = get_tokens(_lowerCAmelCase )
lowercase__ : List[Any] = collections.Counter(_lowerCAmelCase ) & collections.Counter(_lowerCAmelCase )
lowercase__ : int = sum(common.values() )
if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowercase__ : Any = 1.0 * num_same / len(_lowerCAmelCase )
lowercase__ : Dict = 1.0 * num_same / len(_lowerCAmelCase )
lowercase__ : Any = (2 * precision * recall) / (precision + recall)
return fa
def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = {}
lowercase__ : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase__ : Any = qa['id']
lowercase__ : Union[str, Any] = [t for t in qa['answers']['text'] if normalize_answer(_lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowercase__ : Dict = ['']
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
lowercase__ : Optional[int] = preds[qid]
# Take max over all gold answers
lowercase__ : int = max(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for a in gold_answers )
lowercase__ : Optional[Any] = max(compute_fa(_lowerCAmelCase , _lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : str = {}
for qid, s in scores.items():
lowercase__ : int = na_probs[qid] > na_prob_thresh
if pred_na:
lowercase__ : Optional[Any] = float(not qid_to_has_ans[qid] )
else:
lowercase__ : Optional[Any] = s
return new_scores
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=None ):
'''simple docstring'''
if not qid_list:
lowercase__ : Optional[Any] = len(_lowerCAmelCase )
return collections.OrderedDict(
[
('exact', 1_0_0.0 * sum(exact_scores.values() ) / total),
('f1', 1_0_0.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
lowercase__ : Optional[Any] = len(_lowerCAmelCase )
return collections.OrderedDict(
[
('exact', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
for k in new_eval:
lowercase__ : int = new_eval[k]
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
'''simple docstring'''
plt.step(_lowerCAmelCase , _lowerCAmelCase , color='b' , alpha=0.2 , where='post' )
plt.fill_between(_lowerCAmelCase , _lowerCAmelCase , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(_lowerCAmelCase )
plt.savefig(_lowerCAmelCase )
plt.clf()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[str]=None ):
'''simple docstring'''
lowercase__ : Optional[int] = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : na_probs[k] )
lowercase__ : Tuple = 0.0
lowercase__ : List[str] = 1.0
lowercase__ : List[str] = 0.0
lowercase__ : Union[str, Any] = [1.0]
lowercase__ : List[Any] = [0.0]
lowercase__ : Optional[int] = 0.0
for i, qid in enumerate(_lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowercase__ : Tuple = true_pos / float(i + 1 )
lowercase__ : Union[str, Any] = true_pos / float(_lowerCAmelCase )
if i == len(_lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_lowerCAmelCase )
recalls.append(_lowerCAmelCase )
if out_image:
plot_pr_curve(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return {"ap": 1_0_0.0 * avg_prec}
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
if out_image_dir and not os.path.exists(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
lowercase__ : List[str] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowercase__ : Dict = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
lowercase__ : Tuple = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
lowercase__ : List[Any] = {k: float(_lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
lowercase__ : Any = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'pr_exact' )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'pr_f1' )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'pr_oracle' )
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if not qid_list:
return
lowercase__ : List[str] = [na_probs[k] for k in qid_list]
lowercase__ : Tuple = np.ones_like(_lowerCAmelCase ) / float(len(_lowerCAmelCase ) )
plt.hist(_lowerCAmelCase , weights=_lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(_lowerCAmelCase , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : Tuple = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowercase__ : int = num_no_ans
lowercase__ : Optional[int] = cur_score
lowercase__ : Tuple = 0.0
lowercase__ : Dict = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(_lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowercase__ : Optional[int] = scores[qid]
else:
if preds[qid]:
lowercase__ : List[Any] = -1
else:
lowercase__ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
lowercase__ : Dict = cur_score
lowercase__ : Optional[int] = na_probs[qid]
return 1_0_0.0 * best_score / len(_lowerCAmelCase ), best_thresh
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ , lowercase__ : List[Any] = find_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ , lowercase__ : Dict = find_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Any = best_exact
lowercase__ : Tuple = exact_thresh
lowercase__ : Optional[Any] = best_fa
lowercase__ : Any = fa_thresh
def a_ ( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
lowercase__ : List[Any] = json.load(_lowerCAmelCase )
lowercase__ : Union[str, Any] = dataset_json['data']
with open(OPTS.pred_file ) as f:
lowercase__ : str = json.load(_lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowercase__ : Union[str, Any] = json.load(_lowerCAmelCase )
else:
lowercase__ : str = {k: 0.0 for k in preds}
lowercase__ : int = make_qid_to_has_ans(_lowerCAmelCase ) # maps qid to True/False
lowercase__ : List[str] = [k for k, v in qid_to_has_ans.items() if v]
lowercase__ : Any = [k for k, v in qid_to_has_ans.items() if not v]
lowercase__ , lowercase__ : Any = get_raw_scores(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Optional[Any] = apply_no_ans_threshold(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.na_prob_thresh )
lowercase__ : Union[str, Any] = apply_no_ans_threshold(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.na_prob_thresh )
lowercase__ : Tuple = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase )
if has_ans_qids:
lowercase__ : int = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase , qid_list=_lowerCAmelCase )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'HasAns' )
if no_ans_qids:
lowercase__ : Optional[Any] = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase , qid_list=_lowerCAmelCase )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(_lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(_lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
else:
print(json.dumps(_lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 77 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __SCREAMING_SNAKE_CASE (A__ , unittest.TestCase ):
"""simple docstring"""
__a ='hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def UpperCamelCase__ ( self : Dict , __a : Union[str, Any]=0 ):
_a = np.random.RandomState(lowerCamelCase__ )
_a = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCamelCase__ ( self : Any ):
_a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_a = self.get_dummy_inputs()
_a = pipe(**lowerCamelCase__ ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_a = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self : List[str] ):
_a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_a = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_a = self.get_dummy_inputs()
_a = pipe(**lowerCamelCase__ ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_a = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self : List[Any] ):
_a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_a = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_a = self.get_dummy_inputs()
_a = pipe(**lowerCamelCase__ ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_a = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self : str ):
_a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_a = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_a = self.get_dummy_inputs()
_a = pipe(**lowerCamelCase__ ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_a = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self : Optional[int] ):
_a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_a = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_a = self.get_dummy_inputs()
_a = pipe(**lowerCamelCase__ ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_a = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self : Optional[Any] ):
_a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_a = self.get_dummy_inputs()
_a = pipe(**lowerCamelCase__ ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_a = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self : List[str] ):
_a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_a = self.get_dummy_inputs()
_a = 3 * [inputs["prompt"]]
# forward
_a = pipe(**lowerCamelCase__ )
_a = output.images[0, -3:, -3:, -1]
_a = self.get_dummy_inputs()
_a = 3 * [inputs.pop("prompt" )]
_a = pipe.tokenizer(
lowerCamelCase__ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase__ , return_tensors="np" , )
_a = text_inputs["input_ids"]
_a = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
_a = prompt_embeds
# forward
_a = pipe(**lowerCamelCase__ )
_a = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def UpperCamelCase__ ( self : int ):
_a = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_a = self.get_dummy_inputs()
_a = 3 * ["this is a negative prompt"]
_a = negative_prompt
_a = 3 * [inputs["prompt"]]
# forward
_a = pipe(**lowerCamelCase__ )
_a = output.images[0, -3:, -3:, -1]
_a = self.get_dummy_inputs()
_a = 3 * [inputs.pop("prompt" )]
_a = []
for p in [prompt, negative_prompt]:
_a = pipe.tokenizer(
lowerCamelCase__ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase__ , return_tensors="np" , )
_a = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
_a , _a = embeds
# forward
_a = pipe(**lowerCamelCase__ )
_a = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Union[str, Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase__ ( self : Optional[Any] ):
_a = ort.SessionOptions()
_a = False
return options
def UpperCamelCase__ ( self : Union[str, Any] ):
# using the PNDM scheduler by default
_a = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_a = "A painting of a squirrel eating a burger"
np.random.seed(0 )
_a = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self : Tuple ):
_a = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
_a = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_a = "open neural network exchange"
_a = np.random.RandomState(0 )
_a = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type="np" )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self : Optional[int] ):
_a = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
_a = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_a = "open neural network exchange"
_a = np.random.RandomState(0 )
_a = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type="np" )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_a = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self : Dict ):
_a = 0
def test_callback_fn(__a : int , __a : Any , __a : Any ) -> None:
_a = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_a = latents[0, -3:, -3:, -1]
_a = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_a = latents[0, -3:, -3:, -1]
_a = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
_a = False
_a = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_a = "Andromeda galaxy in a bottle"
_a = np.random.RandomState(0 )
pipe(
prompt=lowerCamelCase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert pipe.safety_checker is None
_a = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
_a = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_a = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 357 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42 # [batch_size x 3]
__a =42
__a =42
__a =42
__a =42
__a =42
def UpperCamelCase__ ( self : str ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase__ ( self : List[str] ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = torch.arange(self.height * self.width )
_a = torch.stack(
[
pixel_indices % self.width,
torch.div(__a , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase__ ( self : List[Any] ):
_a , *_a = self.shape
_a = int(np.prod(__a ) )
_a = self.get_image_coords()
_a = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_a = self.get_camera_rays(__a )
_a = rays.view(__a , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase__ ( self : Dict , __a : torch.Tensor ):
_a , *_a , _a = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_a = coords.view(__a , -1 , 2 )
_a = self.resolution()
_a = self.fov()
_a = (flat.float() / (res - 1)) * 2 - 1
_a = fracs * torch.tan(fov / 2 )
_a = fracs.view(__a , -1 , 2 )
_a = (
self.z.view(__a , 1 , 3 )
+ self.x.view(__a , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__a , 1 , 3 ) * fracs[:, :, 1:]
)
_a = directions / directions.norm(dim=-1 , keepdim=__a )
_a = torch.stack(
[
torch.broadcast_to(self.origin.view(__a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__a , *__a , 2 , 3 )
def UpperCamelCase__ ( self : Dict , __a : int , __a : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__a , height=__a , x_fov=self.x_fov , y_fov=self.y_fov , )
def _lowerCamelCase ( lowercase : int ) -> DifferentiableProjectiveCamera:
_a = []
_a = []
_a = []
_a = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_a = np.array([np.sin(lowercase ), np.cos(lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_a = -z * 4
_a = np.array([np.cos(lowercase ), -np.sin(lowercase ), 0.0] )
_a = np.cross(lowercase , lowercase )
origins.append(lowercase )
xs.append(lowercase )
ys.append(lowercase )
zs.append(lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , width=lowercase , height=lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase )) , )
| 346 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCAmelCase = logging.get_logger(__name__)
# General docstring
UpperCAmelCase = '''MobileNetV1Config'''
# Base docstring
UpperCAmelCase = '''google/mobilenet_v1_1.0_224'''
UpperCAmelCase = [1, 1024, 7, 7]
# Image classification docstring
UpperCAmelCase = '''google/mobilenet_v1_1.0_224'''
UpperCAmelCase = '''tabby, tabby cat'''
UpperCAmelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __UpperCamelCase ( lowercase__ : List[str], lowercase__ : Union[str, Any], lowercase__ : Dict=None ):
'''simple docstring'''
__lowercase ={}
if isinstance(__snake_case, __snake_case ):
__lowercase =model.mobilenet_va
else:
__lowercase =model
__lowercase ='''MobilenetV1/Conv2d_0/'''
__lowercase =backbone.conv_stem.convolution.weight
__lowercase =backbone.conv_stem.normalization.bias
__lowercase =backbone.conv_stem.normalization.weight
__lowercase =backbone.conv_stem.normalization.running_mean
__lowercase =backbone.conv_stem.normalization.running_var
for i in range(13 ):
__lowercase =i + 1
__lowercase =i * 2
__lowercase =backbone.layer[pt_index]
__lowercase =F'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
__lowercase =pointer.convolution.weight
__lowercase =pointer.normalization.bias
__lowercase =pointer.normalization.weight
__lowercase =pointer.normalization.running_mean
__lowercase =pointer.normalization.running_var
__lowercase =backbone.layer[pt_index + 1]
__lowercase =F'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
__lowercase =pointer.convolution.weight
__lowercase =pointer.normalization.bias
__lowercase =pointer.normalization.weight
__lowercase =pointer.normalization.running_mean
__lowercase =pointer.normalization.running_var
if isinstance(__snake_case, __snake_case ):
__lowercase ='''MobilenetV1/Logits/Conv2d_1c_1x1/'''
__lowercase =model.classifier.weight
__lowercase =model.classifier.bias
return tf_to_pt_map
def __UpperCamelCase ( lowercase__ : Optional[int], lowercase__ : int, lowercase__ : Dict ):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__lowercase =tf.train.list_variables(__snake_case )
__lowercase ={}
for name, shape in init_vars:
logger.info(F'''Loading TF weight {name} with shape {shape}''' )
__lowercase =tf.train.load_variable(__snake_case, __snake_case )
__lowercase =array
# Build TF to PyTorch weights loading map
__lowercase =_build_tf_to_pytorch_map(__snake_case, __snake_case, __snake_case )
for name, pointer in tf_to_pt_map.items():
logger.info(F'''Importing {name}''' )
if name not in tf_weights:
logger.info(F'''{name} not in tf pre-trained weights, skipping''' )
continue
__lowercase =tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__lowercase =np.transpose(__snake_case, (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__lowercase =array.squeeze().transpose()
else:
__lowercase =np.transpose(__snake_case, (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' )
logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' )
__lowercase =torch.from_numpy(__snake_case )
tf_weights.pop(__snake_case, __snake_case )
tf_weights.pop(name + '/RMSProp', __snake_case )
tf_weights.pop(name + '/RMSProp_1', __snake_case )
tf_weights.pop(name + '/ExponentialMovingAverage', __snake_case )
logger.info(F'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' )
return model
def __UpperCamelCase ( lowercase__ : torch.Tensor, lowercase__ : nn.Convad ):
'''simple docstring'''
__lowercase =features.shape[-2:]
__lowercase =conv_layer.stride
__lowercase =conv_layer.kernel_size
if in_height % stride_height == 0:
__lowercase =max(kernel_height - stride_height, 0 )
else:
__lowercase =max(kernel_height - (in_height % stride_height), 0 )
if in_width % stride_width == 0:
__lowercase =max(kernel_width - stride_width, 0 )
else:
__lowercase =max(kernel_width - (in_width % stride_width), 0 )
__lowercase =pad_along_width // 2
__lowercase =pad_along_width - pad_left
__lowercase =pad_along_height // 2
__lowercase =pad_along_height - pad_top
__lowercase =(pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__snake_case, __snake_case, 'constant', 0.0 )
class lowerCAmelCase ( nn.Module ):
def __init__( self : List[Any] , __lowercase : MobileNetVaConfig , __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : Optional[int] = 1 , __lowercase : Optional[int] = 1 , __lowercase : bool = False , __lowercase : Optional[bool] = True , __lowercase : Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
__lowercase =config
if in_channels % groups != 0:
raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
__lowercase =0 if config.tf_padding else int((kernel_size - 1) / 2 )
__lowercase =nn.Convad(
in_channels=__lowercase , out_channels=__lowercase , kernel_size=__lowercase , stride=__lowercase , padding=__lowercase , groups=__lowercase , bias=__lowercase , padding_mode='zeros' , )
if use_normalization:
__lowercase =nn.BatchNormad(
num_features=__lowercase , eps=config.layer_norm_eps , momentum=0.9_9_9_7 , affine=__lowercase , track_running_stats=__lowercase , )
else:
__lowercase =None
if use_activation:
if isinstance(__lowercase , __lowercase ):
__lowercase =ACTaFN[use_activation]
elif isinstance(config.hidden_act , __lowercase ):
__lowercase =ACTaFN[config.hidden_act]
else:
__lowercase =config.hidden_act
else:
__lowercase =None
def snake_case ( self : str , __lowercase : torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
__lowercase =apply_tf_padding(__lowercase , self.convolution )
__lowercase =self.convolution(__lowercase )
if self.normalization is not None:
__lowercase =self.normalization(__lowercase )
if self.activation is not None:
__lowercase =self.activation(__lowercase )
return features
class lowerCAmelCase ( _A ):
lowerCAmelCase_ = MobileNetVaConfig
lowerCAmelCase_ = load_tf_weights_in_mobilenet_va
lowerCAmelCase_ = "mobilenet_v1"
lowerCAmelCase_ = "pixel_values"
lowerCAmelCase_ = False
def snake_case ( self : Any , __lowercase : Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(__lowercase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowercase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCAmelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCAmelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , _A , )
class lowerCAmelCase ( _A ):
def __init__( self : str , __lowercase : MobileNetVaConfig , __lowercase : bool = True ):
"""simple docstring"""
super().__init__(__lowercase )
__lowercase =config
__lowercase =32
__lowercase =max(int(depth * config.depth_multiplier ) , config.min_depth )
__lowercase =MobileNetVaConvLayer(
__lowercase , in_channels=config.num_channels , out_channels=__lowercase , kernel_size=3 , stride=2 , )
__lowercase =[1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__lowercase =nn.ModuleList()
for i in range(13 ):
__lowercase =out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__lowercase =max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__lowercase , in_channels=__lowercase , out_channels=__lowercase , kernel_size=3 , stride=strides[i] , groups=__lowercase , ) )
self.layer.append(
MobileNetVaConvLayer(
__lowercase , in_channels=__lowercase , out_channels=__lowercase , kernel_size=1 , ) )
__lowercase =nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def snake_case ( self : Any , __lowercase : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case ( self : List[Any] , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[bool] = None , ):
"""simple docstring"""
__lowercase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__lowercase =self.conv_stem(__lowercase )
__lowercase =() if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__lowercase =layer_module(__lowercase )
if output_hidden_states:
__lowercase =all_hidden_states + (hidden_states,)
__lowercase =hidden_states
if self.pooler is not None:
__lowercase =torch.flatten(self.pooler(__lowercase ) , start_dim=1 )
else:
__lowercase =None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowercase , pooler_output=__lowercase , hidden_states=__lowercase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _A , )
class lowerCAmelCase ( _A ):
def __init__( self : List[str] , __lowercase : MobileNetVaConfig ):
"""simple docstring"""
super().__init__(__lowercase )
__lowercase =config.num_labels
__lowercase =MobileNetVaModel(__lowercase )
__lowercase =self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__lowercase =nn.Dropout(config.classifier_dropout_prob , inplace=__lowercase )
__lowercase =nn.Linear(__lowercase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case ( self : Optional[Any] , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[bool] = None , ):
"""simple docstring"""
__lowercase =return_dict if return_dict is not None else self.config.use_return_dict
__lowercase =self.mobilenet_va(__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase )
__lowercase =outputs.pooler_output if return_dict else outputs[1]
__lowercase =self.classifier(self.dropout(__lowercase ) )
__lowercase =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowercase ='''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowercase ='''single_label_classification'''
else:
__lowercase ='''multi_label_classification'''
if self.config.problem_type == "regression":
__lowercase =MSELoss()
if self.num_labels == 1:
__lowercase =loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowercase =loss_fct(__lowercase , __lowercase )
elif self.config.problem_type == "single_label_classification":
__lowercase =CrossEntropyLoss()
__lowercase =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowercase =BCEWithLogitsLoss()
__lowercase =loss_fct(__lowercase , __lowercase )
if not return_dict:
__lowercase =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states , )
| 141 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : List[str] = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''MobileNetV2FeatureExtractor''']
__A : Optional[int] = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33 | 0 |
from __future__ import annotations
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < k or k < 0:
raise ValueError('''Invalid Input''' )
__UpperCamelCase :List[str] = sum(array[:k] )
for i in range(len(SCREAMING_SNAKE_CASE ) - k ):
__UpperCamelCase :int = current_sum - array[i] + array[i + k]
__UpperCamelCase :str = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowercase = [randint(-1000, 1000) for i in range(100)]
__lowercase = randint(0, 110)
print(F'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
| 360 | from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = """gptj"""
a__ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowercase=50_400 , __lowercase=2_048 , __lowercase=4_096 , __lowercase=28 , __lowercase=16 , __lowercase=64 , __lowercase=None , __lowercase="gelu_new" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=1E-5 , __lowercase=0.02 , __lowercase=True , __lowercase=50_256 , __lowercase=50_256 , __lowercase=False , **__lowercase , ) -> Tuple:
__UpperCamelCase :Any = vocab_size
__UpperCamelCase :Optional[int] = n_positions
__UpperCamelCase :Tuple = n_embd
__UpperCamelCase :int = n_layer
__UpperCamelCase :Any = n_head
__UpperCamelCase :Any = n_inner
__UpperCamelCase :Dict = rotary_dim
__UpperCamelCase :Tuple = activation_function
__UpperCamelCase :Optional[Any] = resid_pdrop
__UpperCamelCase :Any = embd_pdrop
__UpperCamelCase :List[str] = attn_pdrop
__UpperCamelCase :str = layer_norm_epsilon
__UpperCamelCase :List[Any] = initializer_range
__UpperCamelCase :Dict = use_cache
__UpperCamelCase :List[Any] = bos_token_id
__UpperCamelCase :Tuple = eos_token_id
super().__init__(
bos_token_id=__lowercase , eos_token_id=__lowercase , tie_word_embeddings=__lowercase , **__lowercase)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase = "default" , __lowercase = None , __lowercase = False , ) -> Any:
super().__init__(__lowercase , task=__lowercase , patching_specs=__lowercase , use_past=__lowercase)
if not getattr(self._config , '''pad_token_id''' , __lowercase):
# TODO: how to do that better?
__UpperCamelCase :Tuple = 0
@property
def UpperCamelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase :Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction='''inputs''')
__UpperCamelCase :str = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__UpperCamelCase :Any = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCamelCase__ ( self) -> int:
return self._config.n_layer
@property
def UpperCamelCase__ ( self) -> int:
return self._config.n_head
def UpperCamelCase__ ( self , __lowercase , __lowercase = -1 , __lowercase = -1 , __lowercase = False , __lowercase = None , ) -> Mapping[str, Any]:
__UpperCamelCase :Optional[int] = super(__lowercase , self).generate_dummy_inputs(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase)
# We need to order the input in the way they appears in the forward()
__UpperCamelCase :int = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
__UpperCamelCase , __UpperCamelCase :str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCamelCase :List[str] = seqlen + 2
__UpperCamelCase :Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase :Tuple = [
(torch.zeros(__lowercase), torch.zeros(__lowercase)) for _ in range(self.num_layers)
]
__UpperCamelCase :Tuple = common_inputs['''attention_mask''']
if self.use_past:
__UpperCamelCase :Tuple = ordered_inputs['''attention_mask'''].dtype
__UpperCamelCase :Optional[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase)] , dim=1)
return ordered_inputs
@property
def UpperCamelCase__ ( self) -> int:
return 13
| 105 | 0 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowercase : list[int] ) -> Dict:
"""simple docstring"""
return len(set(_lowerCamelCase ) ) == len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ : Union[str, Any] = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ : Optional[int] = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ : Optional[Any] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ : Any = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_12,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_12,
}
UpperCamelCase__ : Optional[Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_12,
'''facebook/dpr-question_encoder-multiset-base''': 5_12,
}
UpperCamelCase__ : Dict = {
'''facebook/dpr-reader-single-nq-base''': 5_12,
'''facebook/dpr-reader-multiset-base''': 5_12,
}
UpperCamelCase__ : Optional[int] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase__ : Optional[Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase__ : Any = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : str = VOCAB_FILES_NAMES
_A : str = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_A : Any = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_A : Any = DPRContextEncoderTokenizer
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Dict = VOCAB_FILES_NAMES
_A : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_A : Tuple = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_A : Dict = DPRQuestionEncoderTokenizer
UpperCamelCase__ : str = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
UpperCamelCase__ : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
UpperCamelCase__ : Any = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(lowerCamelCase__ )
class _UpperCamelCase :
'''simple docstring'''
def __call__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : str , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
elif titles is None or texts is None:
__SCREAMING_SNAKE_CASE : List[Any] = titles if texts is None else texts
return super().__call__(
lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Dict = titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [titles]
__SCREAMING_SNAKE_CASE : Tuple = texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [texts]
__SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [questions] * n_passages
assert len(lowerCAmelCase__ ) == len(
lowerCAmelCase__ ), F"There should be as many titles than texts but got {len(lowerCAmelCase__ )} titles and {len(lowerCAmelCase__ )} texts."
__SCREAMING_SNAKE_CASE : int = super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )["""input_ids"""]
__SCREAMING_SNAKE_CASE : str = super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )["""input_ids"""]
__SCREAMING_SNAKE_CASE : Optional[Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
}
if return_attention_mask is not False:
__SCREAMING_SNAKE_CASE : int = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__SCREAMING_SNAKE_CASE : int = attention_mask
return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : BatchEncoding , lowerCAmelCase__ : DPRReaderOutput , lowerCAmelCase__ : int = 1_6 , lowerCAmelCase__ : int = 6_4 , lowerCAmelCase__ : int = 4 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = reader_input["""input_ids"""]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = reader_output[:3]
__SCREAMING_SNAKE_CASE : Tuple = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = sorted(range(lowerCAmelCase__ ) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__ )
__SCREAMING_SNAKE_CASE : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__SCREAMING_SNAKE_CASE : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__SCREAMING_SNAKE_CASE : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__SCREAMING_SNAKE_CASE : List[Any] = sequence_ids.index(self.pad_token_id )
else:
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCAmelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for start_index, start_score in enumerate(lowerCAmelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__SCREAMING_SNAKE_CASE : Optional[Any] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] , reverse=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"Wrong span indices: [{start_index}:{end_index}]"
__SCREAMING_SNAKE_CASE : Optional[Any] = end_index - start_index + 1
assert length <= max_answer_length, F"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase__ )
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
_A : Optional[int] = VOCAB_FILES_NAMES
_A : int = READER_PRETRAINED_VOCAB_FILES_MAP
_A : str = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : str = READER_PRETRAINED_INIT_CONFIGURATION
_A : Dict = ['''input_ids''', '''attention_mask''']
_A : Tuple = DPRReaderTokenizer | 112 | 0 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = 0
for ch in input_str:
__UpperCamelCase :Tuple = ord(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = pow(2 , SCREAMING_SNAKE_CASE )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowercase = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''LayoutLMv3FeatureExtractor''']
__lowercase = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDModel(
sample_size=(3_2, 6_4),in_channels=1,out_channels=1,layers_per_block=2,block_out_channels=(1_2_8, 1_2_8),down_block_types=('AttnDownBlock2D', 'DownBlock2D'),up_block_types=('UpBlock2D', 'AttnUpBlock2D'),)
return model
@property
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
sample_size=(6_4, 3_2),in_channels=1,out_channels=1,layers_per_block=2,block_out_channels=(1_2_8, 1_2_8),down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D'),up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D'),cross_attention_dim=1_0,)
return model
@property
def snake_case__ ( self : Any )-> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = AutoencoderKL(
sample_size=(1_2_8, 6_4),in_channels=1,out_channels=1,latent_channels=1,layers_per_block=2,block_out_channels=(1_2_8, 1_2_8),down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D'),up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D'),)
A__ = UNetaDModel(
sample_size=(6_4, 3_2),in_channels=1,out_channels=1,layers_per_block=2,block_out_channels=(1_2_8, 1_2_8),down_block_types=('AttnDownBlock2D', 'DownBlock2D'),up_block_types=('UpBlock2D', 'AttnUpBlock2D'),)
return vqvae, unet
@slow
def snake_case__ ( self : List[Any] )-> Optional[int]:
'''simple docstring'''
A__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
A__ = Mel(
x_res=self.dummy_unet.config.sample_size[1],y_res=self.dummy_unet.config.sample_size[0],)
A__ = DDPMScheduler()
A__ = AudioDiffusionPipeline(vqvae=lowerCamelCase__,unet=self.dummy_unet,mel=lowerCamelCase__,scheduler=lowerCamelCase__ )
A__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
A__ = pipe(generator=lowerCamelCase__,steps=4 )
A__ = output.audios[0]
A__ = output.images[0]
A__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
A__ = pipe(generator=lowerCamelCase__,steps=4,return_dict=lowerCamelCase__ )
A__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A__ = np.frombuffer(image.tobytes(),dtype='uint8' )[:1_0]
A__ = np.frombuffer(image_from_tuple.tobytes(),dtype='uint8' )[:1_0]
A__ = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1],y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0],)
A__ = DDIMScheduler()
A__ = self.dummy_vqvae_and_unet
A__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0],unet=dummy_vqvae_and_unet[1],mel=lowerCamelCase__,scheduler=lowerCamelCase__ )
A__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
np.random.seed(0 )
A__ = np.random.uniform(-1,1,((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
A__ = pipe(raw_audio=lowerCamelCase__,generator=lowerCamelCase__,start_step=5,steps=1_0 )
A__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A__ = np.frombuffer(image.tobytes(),dtype='uint8' )[:1_0]
A__ = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A__ = self.dummy_unet_condition
A__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0],unet=lowerCamelCase__,mel=lowerCamelCase__,scheduler=lowerCamelCase__ )
A__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
np.random.seed(0 )
A__ = torch.rand((1, 1, 1_0) )
A__ = pipe(generator=lowerCamelCase__,encoding=lowerCamelCase__ )
A__ = output.images[0]
A__ = np.frombuffer(image.tobytes(),dtype='uint8' )[:1_0]
A__ = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any] )-> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : List[str] )-> List[str]:
'''simple docstring'''
A__ = torch_device
A__ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
A__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(4_2 )
A__ = pipe(generator=lowerCamelCase__ )
A__ = output.audios[0]
A__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A__ = np.frombuffer(image.tobytes(),dtype='uint8' )[:1_0]
A__ = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 7 |
A_ :Union[str, Any] = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def A ( a_ ) -> str:
assert type(a_ ) in (int, float) and decimal == int(a_ )
__UpperCamelCase : Union[str, Any] =int(a_ )
__UpperCamelCase : List[str] =''
__UpperCamelCase : Optional[Any] =False
if decimal < 0:
__UpperCamelCase : Tuple =True
decimal *= -1
while decimal > 0:
__UpperCamelCase , __UpperCamelCase : Optional[Any] =divmod(a_ ,16 )
__UpperCamelCase : Tuple =values[remainder] + hexadecimal
__UpperCamelCase : Dict ='0x' + hexadecimal
if negative:
__UpperCamelCase : int ='-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_lowercase: str = False
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
a = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
a = torch.manual_seed(0 )
a = pipe(
image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
a = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
a = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 366 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = LxmertTokenizer
__A = LxmertTokenizerFast
__A = True
__A = True
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = "UNwant\u00E9d,running"
a = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.tokenizer_class(self.vocab_file )
a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCamelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ (self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = "I was born in 92000, and this is falsé."
a = tokenizer.tokenize(lowerCamelCase_ )
a = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
a = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
a = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
a = self.get_rust_tokenizer()
a = tokenizer.encode(lowerCamelCase_ )
a = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
| 71 | 0 |
import os
import time
import numpy as np
import onnxruntime as ort
a_ = """1"""
a_ = """0"""
a_ = """1"""
a_ = ort.SessionOptions()
a_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
a_ = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""]
a_ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
a_ = ort.RunOptions()
a_ = 128
a_ = 1
a_ = np.ones((batch, sequence), dtype=np.intaa)
a_ = np.ones((batch, sequence), dtype=np.intaa)
a_ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
a_ = time.time()
a_ = 2000
a_ = {}
for iter in range(max_iters):
a_ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 340 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a_ : Optional[int] = logging.getLogger(__name__)
def a_ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ =np.argmax(__snake_case , axis=1 )
return np.sum(outputs == labels )
def a_ ( __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
with open(__snake_case , encoding='''utf_8''' ) as f:
lowerCamelCase_ =csv.reader(__snake_case )
lowerCamelCase_ =[]
next(__snake_case ) # skip the first line
for line in tqdm(__snake_case ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a_ ( __snake_case : str , __snake_case : Dict , __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
for dataset in encoded_datasets:
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCamelCase_ =np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCamelCase_ =np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCamelCase_ =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__snake_case ):
lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCamelCase_ =with_conta
lowerCamelCase_ =with_conta
lowerCamelCase_ =len(__snake_case ) - 1
lowerCamelCase_ =len(__snake_case ) - 1
lowerCamelCase_ =with_conta
lowerCamelCase_ =with_conta
lowerCamelCase_ =mc_label
lowerCamelCase_ =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__snake_case ) for t in all_inputs ) )
return tensor_datasets
def a_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__snake_case , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=__snake_case , default='''''' )
parser.add_argument('''--eval_dataset''' , type=__snake_case , default='''''' )
parser.add_argument('''--seed''' , type=__snake_case , default=42 )
parser.add_argument('''--num_train_epochs''' , type=__snake_case , default=3 )
parser.add_argument('''--train_batch_size''' , type=__snake_case , default=8 )
parser.add_argument('''--eval_batch_size''' , type=__snake_case , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__snake_case , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=__snake_case , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=__snake_case , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__snake_case , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=__snake_case , default=6.25e-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=__snake_case , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=__snake_case , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=__snake_case , default=0.0_1 )
parser.add_argument('''--lm_coef''' , type=__snake_case , default=0.9 )
parser.add_argument('''--n_valid''' , type=__snake_case , default=374 )
parser.add_argument('''--server_ip''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' )
lowerCamelCase_ =parser.parse_args()
print(__snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCamelCase_ =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowerCamelCase_ =torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(__snake_case , __snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCamelCase_ =['''_start_''', '''_delimiter_''', '''_classify_''']
lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__snake_case )
lowerCamelCase_ =tokenizer.convert_tokens_to_ids(__snake_case )
lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__snake_case ) )
model.to(__snake_case )
# Load and encode the datasets
def tokenize_and_encode(__snake_case : Union[str, Any] ):
if isinstance(__snake_case , __snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__snake_case ) )
elif isinstance(__snake_case , __snake_case ):
return obj
return [tokenize_and_encode(__snake_case ) for o in obj]
logger.info('''Encoding dataset...''' )
lowerCamelCase_ =load_rocstories_dataset(args.train_dataset )
lowerCamelCase_ =load_rocstories_dataset(args.eval_dataset )
lowerCamelCase_ =(train_dataset, eval_dataset)
lowerCamelCase_ =tokenize_and_encode(__snake_case )
# Compute the max input length for the Transformer
lowerCamelCase_ =model.config.n_positions // 2 - 2
lowerCamelCase_ =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCamelCase_ =min(__snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCamelCase_ =pre_process_datasets(__snake_case , __snake_case , __snake_case , *__snake_case )
lowerCamelCase_, lowerCamelCase_ =tensor_datasets[0], tensor_datasets[1]
lowerCamelCase_ =TensorDataset(*__snake_case )
lowerCamelCase_ =RandomSampler(__snake_case )
lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.train_batch_size )
lowerCamelCase_ =TensorDataset(*__snake_case )
lowerCamelCase_ =SequentialSampler(__snake_case )
lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCamelCase_ =args.max_steps
lowerCamelCase_ =args.max_steps // (len(__snake_case ) // args.gradient_accumulation_steps) + 1
else:
lowerCamelCase_ =len(__snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCamelCase_ =list(model.named_parameters() )
lowerCamelCase_ =['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
lowerCamelCase_ =[
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
lowerCamelCase_ =AdamW(__snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCamelCase_ =get_linear_schedule_with_warmup(
__snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=__snake_case )
if args.do_train:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =tqdm(__snake_case , desc='''Training''' )
for step, batch in enumerate(__snake_case ):
lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch
lowerCamelCase_ =model(__snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case )
lowerCamelCase_ =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCamelCase_ =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCamelCase_ ='''Training loss: {:.2e} lr: {:.2e}'''.format(__snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCamelCase_ =model.module if hasattr(__snake_case , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCamelCase_ =os.path.join(args.output_dir , __snake_case )
lowerCamelCase_ =os.path.join(args.output_dir , __snake_case )
torch.save(model_to_save.state_dict() , __snake_case )
model_to_save.config.to_json_file(__snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__snake_case )
if args.do_eval:
model.eval()
lowerCamelCase_, lowerCamelCase_ =0, 0
lowerCamelCase_, lowerCamelCase_ =0, 0
for batch in tqdm(__snake_case , desc='''Evaluating''' ):
lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch
with torch.no_grad():
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =model(
__snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case )
lowerCamelCase_ =mc_logits.detach().cpu().numpy()
lowerCamelCase_ =mc_labels.to('''cpu''' ).numpy()
lowerCamelCase_ =accuracy(__snake_case , __snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCamelCase_ =eval_loss / nb_eval_steps
lowerCamelCase_ =eval_accuracy / nb_eval_examples
lowerCamelCase_ =tr_loss / nb_tr_steps if args.do_train else None
lowerCamelCase_ ={'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
lowerCamelCase_ =os.path.join(args.output_dir , '''eval_results.txt''' )
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , __snake_case , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 75 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCamelCase_ = 2_5_0_0_0_4
UpperCamelCase_ = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = MBartTokenizer
A : str = MBartTokenizerFast
A : List[Any] = True
A : int = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Optional[int] = MBartTokenizer(A, keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MBartTokenizer(A, keep_accents=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(A, ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
], )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
], )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE : int = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.save_pretrained(A )
SCREAMING_SNAKE_CASE : Dict = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE : int = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(A, A )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.from_pretrained(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A, A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = tokenizer_r.save_pretrained(A, legacy_format=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A, A )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : str = tokenizer_r.from_pretrained(A )
SCREAMING_SNAKE_CASE : str = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A, A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.save_pretrained(A, legacy_format=A )
SCREAMING_SNAKE_CASE : Any = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.from_pretrained(A )
SCREAMING_SNAKE_CASE : Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A, A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
'''simple docstring'''
A : Any = '''facebook/mbart-large-en-ro'''
A : Any = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
A : Dict = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
A : Dict = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='en_XX', tgt_lang='ro_RO' )
SCREAMING_SNAKE_CASE : str = 1
return cls
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'], 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'], 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'], 250_020 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertIn(A, self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE : Optional[Any] = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.decode(A, skip_special_tokens=A )
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=A )
self.assertEqual(A, A )
self.assertNotIn(self.tokenizer.eos_token, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0], A )
SCREAMING_SNAKE_CASE : List[Any] = 10
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(A, max_length=A, truncation=A ).input_ids[0]
self.assertEqual(ids[-2], 2 )
self.assertEqual(ids[-1], A )
self.assertEqual(len(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ), [250_026, 250_001] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
SCREAMING_SNAKE_CASE : Optional[int] = MBartTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, A )
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Union[str, Any] = shift_tokens_right(batch['labels'], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=A, truncation=A, max_length=len(self.expected_src_tokens ), return_tensors='pt', )
SCREAMING_SNAKE_CASE : Any = shift_tokens_right(batch['labels'], self.tokenizer.pad_token_id )
self.assertIsInstance(A, A )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : Any = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, A )
self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text, padding=A, truncation=A, max_length=3, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(
text_target=self.tgt_text, padding=A, truncation=A, max_length=10, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Optional[Any] = targets['input_ids']
SCREAMING_SNAKE_CASE : Tuple = shift_tokens_right(A, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer._build_translation_inputs(
'A test', return_tensors='pt', src_lang='en_XX', tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(A ), {
# A, test, EOS, en_XX
'input_ids': [[62, 3_034, 2, 250_004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250_001,
}, )
| 360 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
UpperCamelCase_ = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
UpperCamelCase_ = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
UpperCamelCase_ = R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ), reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'], )
def UpperCamelCase_ ( self, A, A, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = spearmanr(A, A )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 246 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_UpperCAmelCase = {'unk_token': '<unk>'}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(A))
_UpperCAmelCase = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_UpperCAmelCase = os.path.join(self.tmpdirname , A)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(A , A)
def _lowerCamelCase ( self : Union[str, Any] , **A : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : Optional[int] , **A : Tuple) -> str:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : Union[str, Any] , **A : Any) -> Optional[Any]:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _lowerCamelCase ( self : Any) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(A , 0 , -1)) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = CLIPProcessor(tokenizer=A , image_processor=A)
processor_slow.save_pretrained(self.tmpdirname)
_UpperCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A)
_UpperCAmelCase = CLIPProcessor(tokenizer=A , image_processor=A)
processor_fast.save_pretrained(self.tmpdirname)
_UpperCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , A)
self.assertIsInstance(processor_fast.tokenizer , A)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , A)
self.assertIsInstance(processor_fast.image_processor , A)
def _lowerCamelCase ( self : List[str]) -> int:
"""simple docstring"""
_UpperCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
_UpperCAmelCase = self.get_image_processor(do_normalize=A , padding_value=1.0)
_UpperCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , A)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , A)
def _lowerCamelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPProcessor(tokenizer=A , image_processor=A)
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(A , return_tensors='np')
_UpperCAmelCase = processor(images=A , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _lowerCamelCase ( self : str) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPProcessor(tokenizer=A , image_processor=A)
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = processor(text=A)
_UpperCAmelCase = tokenizer(A)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPProcessor(tokenizer=A , image_processor=A)
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=A , images=A)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(A):
processor()
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPProcessor(tokenizer=A , image_processor=A)
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(A)
_UpperCAmelCase = tokenizer.batch_decode(A)
self.assertListEqual(A , A)
def _lowerCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = CLIPProcessor(tokenizer=A , image_processor=A)
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=A , images=A)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 339 |
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A ( _UpperCAmelCase : Matrix ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(_UpperCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
_UpperCAmelCase = 0
return None
def A ( _UpperCAmelCase : Matrix ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(_UpperCAmelCase , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 339 | 1 |
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
from torch.utils.cpp_extension import load
__lowerCamelCase : Optional[int] = Path(lowerCamelCase__ ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
__lowerCamelCase : Tuple = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , lowerCamelCase__ , with_cuda=lowerCamelCase__ , extra_include_paths=[str(lowerCamelCase__ )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 353 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> None:
__lowerCamelCase : int = len(lowerCamelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCamelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCamelCase__ , lowerCamelCase__ , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
__lowerCamelCase : list[list[str]] = []
depth_first_search([] , [] , [] , lowerCamelCase__ , lowerCamelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCamelCase__ )
print('' )
print(len(lowerCamelCase__ ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 113 | 0 |
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__magic_name__ = TypeVar("T")
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ = True):
__SCREAMING_SNAKE_CASE = {} # dictionary of lists
__SCREAMING_SNAKE_CASE = directed
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase__)
self.adj_list[destination_vertex].append(lowerCAmelCase__)
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__SCREAMING_SNAKE_CASE = [destination_vertex]
__SCREAMING_SNAKE_CASE = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase__)
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__SCREAMING_SNAKE_CASE = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__SCREAMING_SNAKE_CASE = [destination_vertex]
__SCREAMING_SNAKE_CASE = []
return self
def __repr__( self):
return pformat(self.adj_list)
| 100 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a : List[str] = 2_5_0_0_0_4
a : List[str] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = MBartTokenizer
lowercase = MBartTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : str = MBartTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self ) -> int:
UpperCAmelCase : Optional[Any] = MBartTokenizer(A , keep_accents=A )
UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A )
UpperCAmelCase : int = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase : Any = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : str = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = 'facebook/mbart-large-en-ro'
lowercase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowercase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowercase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def _lowercase( cls ) -> Tuple:
UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
UpperCAmelCase : int = 1
return cls
def _lowercase( self ) -> Union[str, Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def _lowercase( self ) -> List[str]:
self.assertIn(A , self.tokenizer.all_special_ids )
UpperCAmelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase : int = self.tokenizer.decode(A , skip_special_tokens=A )
UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
UpperCAmelCase : int = 10
UpperCAmelCase : List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , A )
self.assertEqual(len(A ) , A )
def _lowercase( self ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = tempfile.mkdtemp()
UpperCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
UpperCAmelCase : Tuple = MBartTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
UpperCAmelCase : Dict = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
UpperCAmelCase : Dict = targets["""input_ids"""]
UpperCAmelCase : Union[str, Any] = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 3034, 2, 250004]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250001,
} , )
| 265 | 0 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class A_ ( _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ = """efficientnet"""
def __init__(self :int , _UpperCamelCase :Union[str, Any] = 3 , _UpperCamelCase :Optional[int] = 600 , _UpperCamelCase :Tuple = 2.0 , _UpperCamelCase :Tuple = 3.1 , _UpperCamelCase :Union[str, Any] = 8 , _UpperCamelCase :Union[str, Any] = [3, 3, 5, 3, 5, 5, 3] , _UpperCamelCase :Any = [32, 16, 24, 40, 80, 112, 192] , _UpperCamelCase :Optional[Any] = [16, 24, 40, 80, 112, 192, 320] , _UpperCamelCase :int = [] , _UpperCamelCase :Optional[Any] = [1, 2, 2, 2, 1, 2, 1] , _UpperCamelCase :Dict = [1, 2, 2, 3, 3, 4, 1] , _UpperCamelCase :int = [1, 6, 6, 6, 6, 6, 6] , _UpperCamelCase :Tuple = 0.2_5 , _UpperCamelCase :Union[str, Any] = "swish" , _UpperCamelCase :Tuple = 2560 , _UpperCamelCase :List[Any] = "mean" , _UpperCamelCase :Dict = 0.0_2 , _UpperCamelCase :List[Any] = 0.0_0_1 , _UpperCamelCase :Optional[int] = 0.9_9 , _UpperCamelCase :Tuple = 0.5 , _UpperCamelCase :Optional[int] = 0.2 , **_UpperCamelCase :Dict , )-> List[str]:
super().__init__(**_UpperCamelCase )
__A = num_channels
__A = image_size
__A = width_coefficient
__A = depth_coefficient
__A = depth_divisor
__A = kernel_sizes
__A = in_channels
__A = out_channels
__A = depthwise_padding
__A = strides
__A = num_block_repeats
__A = expand_ratios
__A = squeeze_expansion_ratio
__A = hidden_act
__A = hidden_dim
__A = pooling_type
__A = initializer_range
__A = batch_norm_eps
__A = batch_norm_momentum
__A = dropout_rate
__A = drop_connect_rate
__A = sum(_UpperCamelCase ) * 4
class A_ ( _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def _lowerCAmelCase (self :str )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCAmelCase (self :List[Any] )-> float:
return 1e-5
| 366 |
import sys
def _a ( lowerCamelCase: Tuple ) -> Tuple:
'''simple docstring'''
__A = len(lowerCamelCase )
__A = [[0 for x in range(lowerCamelCase )] for x in range(lowerCamelCase )]
__A = [[0 for x in range(lowerCamelCase )] for x in range(lowerCamelCase )]
for chain_length in range(2 , lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
__A = a + chain_length - 1
__A = sys.maxsize
for c in range(lowerCamelCase , lowerCamelCase ):
__A = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__A = cost
__A = c
return matrix, sol
def _a ( lowerCamelCase: Optional[int] , lowerCamelCase: Optional[Any] , lowerCamelCase: List[str] ) -> Tuple:
'''simple docstring'''
if i == j:
print('''A''' + str(lowerCamelCase ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(lowerCamelCase , lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(lowerCamelCase , optimal_solution[i][j] + 1 , lowerCamelCase )
print(''')''' , end=''' ''' )
def _a ( ) -> List[str]:
'''simple docstring'''
__A = [30, 35, 15, 5, 10, 20, 25]
__A = len(lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__A , __A = matrix_chain_order(lowerCamelCase )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 250 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Tuple = '''deit'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE__ : List[Any]=1_2 , SCREAMING_SNAKE_CASE__ : List[str]=1_2 , SCREAMING_SNAKE_CASE__ : Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=1E-12 , SCREAMING_SNAKE_CASE__ : Optional[int]=2_2_4 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_6 , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = hidden_size
a_ : Dict = num_hidden_layers
a_ : int = num_attention_heads
a_ : Optional[Any] = intermediate_size
a_ : Optional[int] = hidden_act
a_ : int = hidden_dropout_prob
a_ : Any = attention_probs_dropout_prob
a_ : List[str] = initializer_range
a_ : Optional[Any] = layer_norm_eps
a_ : str = image_size
a_ : Dict = patch_size
a_ : Union[str, Any] = num_channels
a_ : Tuple = qkv_bias
a_ : int = encoder_stride
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Any = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1E-4
| 32 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
_A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
_A = {'vinai/bartpho-syllable': 1024}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =monolingual_vocab_file
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__UpperCamelCase ={}
__UpperCamelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(A_ ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase =cnt
cnt += 1
with open(A_ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
__UpperCamelCase =line.strip().split()[0]
__UpperCamelCase =len(self.fairseq_tokens_to_ids )
if str(A_ ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase =len(self.fairseq_tokens_to_ids )
__UpperCamelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
__UpperCamelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A_ ) -> List[str]:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
__UpperCamelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self ) -> Any:
return len(self.fairseq_ids_to_tokens )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> str:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _a ( self , A_ ) -> int:
return self.fairseq_ids_to_tokens[index]
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =''.join(A_ ).replace(A_ , ' ' ).strip()
return out_string
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
A_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , A_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(A_ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'{str(A_ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 62 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class snake_case__(__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ = "beit"
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Dict=8_192 , SCREAMING_SNAKE_CASE : Optional[Any]=768 , SCREAMING_SNAKE_CASE : Optional[int]=12 , SCREAMING_SNAKE_CASE : List[str]=12 , SCREAMING_SNAKE_CASE : str=3_072 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Any=0.0 , SCREAMING_SNAKE_CASE : str=0.02 , SCREAMING_SNAKE_CASE : Optional[int]=1E-1_2 , SCREAMING_SNAKE_CASE : int=224 , SCREAMING_SNAKE_CASE : List[str]=16 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]=[3, 5, 7, 11] , SCREAMING_SNAKE_CASE : Optional[Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : int=0.4 , SCREAMING_SNAKE_CASE : List[Any]=256 , SCREAMING_SNAKE_CASE : Optional[int]=1 , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : Any=255 , **SCREAMING_SNAKE_CASE : Dict , ):
super().__init__(**_snake_case )
lowercase__ : Dict = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Any = hidden_act
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = initializer_range
lowercase__ : str = layer_norm_eps
lowercase__ : Optional[Any] = image_size
lowercase__ : str = patch_size
lowercase__ : List[Any] = num_channels
lowercase__ : Union[str, Any] = use_mask_token
lowercase__ : Optional[int] = use_absolute_position_embeddings
lowercase__ : Optional[int] = use_relative_position_bias
lowercase__ : Union[str, Any] = use_shared_relative_position_bias
lowercase__ : str = layer_scale_init_value
lowercase__ : List[Any] = drop_path_rate
lowercase__ : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ : Dict = out_indices
lowercase__ : str = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ : Any = use_auxiliary_head
lowercase__ : Union[str, Any] = auxiliary_loss_weight
lowercase__ : Optional[int] = auxiliary_channels
lowercase__ : List[Any] = auxiliary_num_convs
lowercase__ : List[str] = auxiliary_concat_input
lowercase__ : Any = semantic_loss_ignore_index
class snake_case__(__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ = version.parse("""1.11""" )
@property
def snake_case ( self : List[str] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case ( self : Tuple ):
return 1E-4
| 351 |
from __future__ import annotations
lowerCAmelCase__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
lowercase__ : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCamelCase__ ) )
] # the reference grid
lowercase__ : List[Any] = 1
lowercase__ : Tuple = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCamelCase__ ) )
] # the action grid
lowercase__ : Union[str, Any] = init[0]
lowercase__ : List[str] = init[1]
lowercase__ : Optional[Any] = 0
lowercase__ : Optional[int] = g + heuristic[x][y] # cost from starting cell to destination cell
lowercase__ : Tuple = [[f, g, x, y]]
lowercase__ : Union[str, Any] = False # flag that is set when search is complete
lowercase__ : Any = False # flag set if we can't find expand
while not found and not resign:
if len(lowerCamelCase__ ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowercase__ : Tuple = cell.pop()
lowercase__ : Optional[Any] = next_cell[2]
lowercase__ : int = next_cell[3]
lowercase__ : Union[str, Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
lowercase__ : Tuple = True
else:
for i in range(len(lowerCamelCase__ ) ): # to try out different valid actions
lowercase__ : Tuple = x + DIRECTIONS[i][0]
lowercase__ : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowerCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowercase__ : List[Any] = g + cost
lowercase__ : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowercase__ : Dict = 1
lowercase__ : Union[str, Any] = i
lowercase__ : Optional[int] = []
lowercase__ : List[Any] = goal[0]
lowercase__ : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowercase__ : int = x - DIRECTIONS[action[x][y]][0]
lowercase__ : List[Any] = y - DIRECTIONS[action[x][y]][1]
lowercase__ : Optional[Any] = xa
lowercase__ : Dict = ya
invpath.append([x, y] )
lowercase__ : List[str] = []
for i in range(len(lowerCamelCase__ ) ):
path.append(invpath[len(lowerCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowerCAmelCase__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowerCAmelCase__ = [0, 0]
# all coordinates are given in format [y,x]
lowerCAmelCase__ = [len(grid) - 1, len(grid[0]) - 1]
lowerCAmelCase__ = 1
# the cost map which pushes the path closer to the goal
lowerCAmelCase__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowerCAmelCase__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowerCAmelCase__ = 9_9
lowerCAmelCase__ , lowerCAmelCase__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 121 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCamelCase : Optional[Any] = pytest.mark.integration
@require_faiss
class __lowerCAmelCase ( __magic_name__ ):
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(__magic_name__ ) for x in np.arange(30 ).tolist()]} )
return dset
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
import faiss
a = self._create_dummy_dataset()
a = dset.map(
lambda __magic_name__ , __magic_name__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__magic_name__ , keep_in_memory=__magic_name__ )
a = dset.add_faiss_index("""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
a , a = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
a , a = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__magic_name__ ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
a , a = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(__magic_name__ , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
from elasticsearch import Elasticsearch
a = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
a = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 30 )
a = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 29}]}}
a = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=__magic_name__ )
a , a = dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class __lowerCAmelCase ( __magic_name__ ):
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__magic_name__ )
self.assertRaises(__magic_name__ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
a = np.eye(5 , dtype=np.floataa )[::-1]
a , a = index.search_batch(__magic_name__ )
self.assertRaises(__magic_name__ , index.search_batch , queries[0] )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__magic_name__ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __magic_name__ )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
import faiss
a = FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
a = FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__magic_name__ ):
a = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
import faiss
a = faiss.IndexFlat(5 )
a = FaissIndex(custom_index=__magic_name__ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__magic_name__ ) as tmp_file:
index.save(tmp_file.name )
a = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__magic_name__ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __A ( __lowerCamelCase ) -> List[str]:
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
a = """index.faiss"""
a = f'mock://{index_name}'
index.save(__lowerCamelCase , storage_options=mockfs.storage_options )
a = FaissIndex.load(__lowerCamelCase , storage_options=mockfs.storage_options )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__lowerCamelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowerCAmelCase ( __magic_name__ ):
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
a = Elasticsearch()
a = {"""acknowledged""": True}
a = ElasticSearchIndex(es_client=__magic_name__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
a = """foo"""
a = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
a , a = index.search(__magic_name__ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
a = """foo"""
a = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
a , a = index.search(__magic_name__ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
a = ["""foo""", """bar""", """foobar"""]
a = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
a , a = index.search_batch(__magic_name__ )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__magic_name__ ) , 0 )
self.assertListEqual([1, 1, 1] , __magic_name__ )
# batched queries with timeout
a = ["""foo""", """bar""", """foobar"""]
a = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
a , a = index.search_batch(__magic_name__ , request_timeout=30 )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__magic_name__ ) , 0 )
self.assertListEqual([1, 1, 1] , __magic_name__ )
| 228 |
__UpperCamelCase : Optional[int] = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 228 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _snake_case( ) -> int:
lowercase : Optional[int] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
lowercase : str = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert("""RGB""" )
return image
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
lowercase : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
lowercase : str = dct.pop(SCREAMING_SNAKE_CASE__ )
lowercase : Any = val
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase : List[str] = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" )
lowercase : int = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
lowercase : List[str] = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE__ , requires_grad=SCREAMING_SNAKE_CASE__ ), v_bias) )
lowercase : Any = qkv_bias
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase : str = 364 if """coco""" in model_name else 224
lowercase : Any = BlipaVisionConfig(image_size=SCREAMING_SNAKE_CASE__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowercase : Dict = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=SCREAMING_SNAKE_CASE__ ).to_dict()
elif "opt-6.7b" in model_name:
lowercase : Any = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=SCREAMING_SNAKE_CASE__ ).to_dict()
elif "t5-xl" in model_name:
lowercase : int = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase : Union[str, Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
lowercase : Optional[int] = BlipaConfig(vision_config=SCREAMING_SNAKE_CASE__ , text_config=SCREAMING_SNAKE_CASE__ )
return config, image_size
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False ) -> Any:
lowercase : Dict = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
lowercase : List[str] = tokenizer("""\n""" , add_special_tokens=SCREAMING_SNAKE_CASE__ ).input_ids[0]
lowercase , lowercase : Optional[int] = get_blipa_config(SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = BlipaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : str = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
lowercase , lowercase : int = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
lowercase : Tuple = """cuda""" if torch.cuda.is_available() else """cpu"""
lowercase , lowercase , lowercase : Tuple = load_model_and_preprocess(
name=SCREAMING_SNAKE_CASE__ , model_type=SCREAMING_SNAKE_CASE__ , is_eval=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
lowercase : List[Any] = original_model.state_dict()
lowercase : Dict = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
if key.startswith("""Qformer.bert""" ):
lowercase : Union[str, Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
lowercase : Any = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
lowercase : Optional[int] = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
lowercase : Any = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
lowercase : Optional[int] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
lowercase : str = key.replace("""t5""" , """language""" )
lowercase : Union[str, Any] = val
# read in qv biases
read_in_q_v_bias(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Optional[Any] = hf_model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowercase : Optional[Any] = load_demo_image()
lowercase : Optional[Any] = vis_processors["""eval"""](SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(SCREAMING_SNAKE_CASE__ )
# create processor
lowercase : Union[str, Any] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=SCREAMING_SNAKE_CASE__ , image_std=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = BlipaProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values.to(SCREAMING_SNAKE_CASE__ )
# make sure processor creates exact same pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
original_model.to(SCREAMING_SNAKE_CASE__ )
hf_model.to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
if "opt" in model_name:
lowercase : Union[str, Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
lowercase : Optional[Any] = hf_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).logits
else:
lowercase : Union[str, Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
lowercase : Any = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
lowercase : Union[str, Any] = hf_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowercase : List[str] = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=SCREAMING_SNAKE_CASE__ )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowercase : int = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=SCREAMING_SNAKE_CASE__ )
else:
# cast to same type
lowercase : str = logits.dtype
assert torch.allclose(original_logits.to(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
lowercase : str = """"""
lowercase : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).input_ids.to(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = original_model.generate({"""image""": original_pixel_values} )
lowercase : Any = hf_model.generate(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = input_ids.shape[1]
lowercase : Optional[int] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = [text.strip() for text in output_text]
print("""HF generation:""" , SCREAMING_SNAKE_CASE__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
processor.push_to_hub(f"nielsr/{model_name}" )
hf_model.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
lowercase : str = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
lowercase : Union[str, Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 285 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[str]:
# vision encoder
if "img_encoder.pos_embed" in name:
lowercase : Dict = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowercase : Any = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowercase : Tuple = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowercase : Tuple = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowercase : Optional[Any] = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowercase : Union[str, Any] = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase : Dict = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowercase : Dict = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowercase : Tuple = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowercase : Tuple = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowercase : List[Any] = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase : Tuple = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowercase : Any = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowercase : Dict = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowercase : List[str] = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase : Tuple = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase : str = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase : Union[str, Any] = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowercase : Tuple = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowercase : Optional[Any] = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase : List[str] = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowercase : Tuple = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowercase : List[Any] = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowercase : List[Any] = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
for key in orig_state_dict.copy().keys():
lowercase : List[Any] = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase : Dict = key.split(""".""" )
lowercase , lowercase : Optional[Any] = int(key_split[2] ), int(key_split[4] )
lowercase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
lowercase : str = val[:dim, :]
lowercase : List[str] = val[dim : dim * 2, :]
lowercase : Optional[int] = val[-dim:, :]
else:
lowercase : Dict = val[:dim]
lowercase : Dict = val[dim : dim * 2]
lowercase : Dict = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase : int = key.split(""".""" )
lowercase : Tuple = int(key_split[3] )
lowercase : str = config.text_config.hidden_size
if "weight" in key:
lowercase : Optional[int] = val[:dim, :]
lowercase : Optional[Any] = val[
dim : dim * 2, :
]
lowercase : Optional[int] = val[-dim:, :]
else:
lowercase : Optional[int] = val[:dim]
lowercase : Dict = val[dim : dim * 2]
lowercase : List[str] = val[-dim:]
else:
lowercase : Tuple = rename_key(SCREAMING_SNAKE_CASE__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase : str = val.squeeze_()
else:
lowercase : Any = val
return orig_state_dict
def _snake_case( ) -> List[Any]:
lowercase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="groupvit-gcc-yfcc" , SCREAMING_SNAKE_CASE__=False ) -> str:
lowercase : Dict = GroupViTConfig()
lowercase : Tuple = GroupViTModel(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : List[str] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""]
lowercase : Optional[Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : int = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE__ ) == 0)
# verify result
lowercase : Tuple = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase : Optional[Any] = prepare_img()
lowercase : List[Any] = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
with torch.no_grad():
lowercase : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
if model_name == "groupvit-gcc-yfcc":
lowercase : Optional[int] = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase : int = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print("""Successfully saved processor and model to""" , SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
lowercase : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 285 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCamelCase : Optional[int] =random.Random()
if is_torch_available():
import torch
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=1.0 , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> int:
if rng is None:
UpperCamelCase__ : Optional[int] = global_rng
UpperCamelCase__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __a ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any]=7 , SCREAMING_SNAKE_CASE : List[str]=4_00 , SCREAMING_SNAKE_CASE : int=20_00 , SCREAMING_SNAKE_CASE : List[str]=1 , SCREAMING_SNAKE_CASE : int=0.0 , SCREAMING_SNAKE_CASE : List[str]=1_60_00 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : List[str]=True , ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : Dict = batch_size
UpperCamelCase__ : int = min_seq_length
UpperCamelCase__ : List[Any] = max_seq_length
UpperCamelCase__ : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ : Optional[Any] = feature_size
UpperCamelCase__ : Any = padding_value
UpperCamelCase__ : List[str] = sampling_rate
UpperCamelCase__ : Optional[Any] = return_attention_mask
UpperCamelCase__ : Union[str, Any] = do_normalize
def __lowercase ( self : Any ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : str=False ):
'''simple docstring'''
def _flatten(SCREAMING_SNAKE_CASE : int ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE ) )
if equal_length:
UpperCamelCase__ : int = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase__ : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase__ : Any = [np.asarray(SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __a ( A__ , unittest.TestCase ):
_lowerCAmelCase : Any = ASTFeatureExtractor
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : int = ASTFeatureExtractionTester(self )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ : str = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCamelCase__ : str = [np.asarray(SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
UpperCamelCase__ : List[str] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test batched
UpperCamelCase__ : int = feat_extract(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
UpperCamelCase__ : Optional[int] = feat_extract(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ : Dict = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
UpperCamelCase__ : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = feat_extract(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
UpperCamelCase__ : int = feat_extract(SCREAMING_SNAKE_CASE , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
@require_torch
def __lowercase ( self : List[Any] ):
'''simple docstring'''
import torch
UpperCamelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ : List[str] = np.random.rand(1_00 ).astype(np.floataa )
UpperCamelCase__ : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ : int = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase__ : Union[str, Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
from datasets import load_dataset
UpperCamelCase__ : Union[str, Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ : List[Any] = ds.sort("id" ).select(range(SCREAMING_SNAKE_CASE ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
UpperCamelCase__ : str = self._load_datasamples(1 )
UpperCamelCase__ : List[str] = ASTFeatureExtractor()
UpperCamelCase__ : str = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) | 189 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __a ( A__ ):
_lowerCAmelCase : str = '''facebook/bart-large-mnli'''
_lowerCAmelCase : Tuple = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
_lowerCAmelCase : Any = '''text_classifier'''
_lowerCAmelCase : int = AutoTokenizer
_lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification
_lowerCAmelCase : Union[str, Any] = ['''text''', ['''text''']]
_lowerCAmelCase : Dict = ['''text''']
def __lowercase ( self : int ):
'''simple docstring'''
super().setup()
UpperCamelCase__ : Dict = self.model.config
UpperCamelCase__ : Union[str, Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
UpperCamelCase__ : List[str] = int(SCREAMING_SNAKE_CASE )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Any = labels
return self.pre_processor(
[text] * len(SCREAMING_SNAKE_CASE ) , [F'This example is {label}' for label in labels] , return_tensors="pt" , padding="max_length" , )
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = outputs.logits
UpperCamelCase__ : Any = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 189 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
__snake_case = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : str, _lowerCAmelCase : Tuple=8 ):
"""simple docstring"""
_a = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_a = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict:
super().__init__()
self.register_modules(
unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , movq=__UpperCAmelCase , )
_a = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
if latents is None:
_a = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_a = latents.to(__UpperCAmelCase )
_a = latents * scheduler.init_noise_sigma
return latents
def _UpperCAmelCase ( self , __UpperCAmelCase=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_a = torch.device(F'cuda:{gpu_id}' )
_a = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_a = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_a = None
for cpu_offloaded_model in [self.unet, self.movq]:
_a , _a = cpu_offload_with_hook(__UpperCAmelCase , __UpperCAmelCase , prev_module_hook=__UpperCAmelCase )
# We'll offload the last model manually.
_a = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self ) -> Any:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCAmelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 512 , __UpperCAmelCase = 512 , __UpperCAmelCase = 100 , __UpperCAmelCase = 4.0 , __UpperCAmelCase = 1 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , ) -> str:
_a = self._execution_device
_a = guidance_scale > 1.0
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_a = torch.cat(__UpperCAmelCase , dim=0 )
_a = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_a = torch.cat(__UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
_a = image_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
_a = negative_image_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
_a = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCAmelCase )
self.scheduler.set_timesteps(__UpperCAmelCase , device=__UpperCAmelCase )
_a = self.scheduler.timesteps
_a = self.unet.config.in_channels
_a , _a = downscale_height_and_width(__UpperCAmelCase , __UpperCAmelCase , self.movq_scale_factor )
# create initial latent
_a = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a = {'''image_embeds''': image_embeds}
_a = self.unet(
sample=__UpperCAmelCase , timestep=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , added_cond_kwargs=__UpperCAmelCase , return_dict=__UpperCAmelCase , )[0]
if do_classifier_free_guidance:
_a , _a = noise_pred.split(latents.shape[1] , dim=1 )
_a , _a = noise_pred.chunk(2 )
_a , _a = variance_pred.chunk(2 )
_a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_a = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_a , _a = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase , )[0]
# post-processing
_a = self.movq.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_a = image * 0.5 + 0.5
_a = image.clamp(0 , 1 )
_a = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_a = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase ) | 153 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
_a = remove_duplicates(key.upper() )
_a = len(_lowerCAmelCase )
# First fill cipher with key characters
_a = {alphabet[i]: char for i, char in enumerate(_lowerCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_lowerCAmelCase ), 26 ):
_a = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_a = alphabet[i - offset]
_a = char
return cipher_alphabet
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : dict[str, str] ):
"""simple docstring"""
return "".join(cipher_map.get(_lowerCAmelCase, _lowerCAmelCase ) for ch in message.upper() )
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : dict[str, str] ):
"""simple docstring"""
_a = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_lowerCAmelCase, _lowerCAmelCase ) for ch in message.upper() )
def A_ ( ):
"""simple docstring"""
_a = input('''Enter message to encode or decode: ''' ).strip()
_a = input('''Enter keyword: ''' ).strip()
_a = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
_a = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
_a = create_cipher_map(_lowerCAmelCase )
print(func(_lowerCAmelCase, _lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 153 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A__ = logging.get_logger(__name__)
A__ = {"""vocab_file""": """spiece.model"""}
A__ = {
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class __lowerCAmelCase ( lowerCamelCase__ ):
def __init__( self , _snake_case , _snake_case=False , _snake_case=True , _snake_case=False , _snake_case="<s>" , _snake_case="</s>" , _snake_case="<unk>" , _snake_case="<sep>" , _snake_case="<pad>" , _snake_case="<cls>" , _snake_case="<mask>" , _snake_case=["<eop>", "<eod>"] , _snake_case = None , **_snake_case , ):
"""simple docstring"""
_lowerCAmelCase = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
_lowerCAmelCase = 3
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_lowerCAmelCase = jieba
_lowerCAmelCase = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def snake_case ( self ):
"""simple docstring"""
return len(self.sp_model )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self , _snake_case ):
"""simple docstring"""
if self.remove_space:
_lowerCAmelCase = """ """.join(inputs.strip().split() )
else:
_lowerCAmelCase = inputs
_lowerCAmelCase = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_lowerCAmelCase = unicodedata.normalize("""NFKD""" , _snake_case )
_lowerCAmelCase = """""".join([c for c in outputs if not unicodedata.combining(_snake_case )] )
if self.do_lower_case:
_lowerCAmelCase = outputs.lower()
return outputs
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.preprocess_text(_snake_case )
_lowerCAmelCase = self.sp_model.encode(_snake_case , out_type=_snake_case )
_lowerCAmelCase = []
for piece in pieces:
if len(_snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase = cur_pieces[1:]
else:
_lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_snake_case )
else:
new_pieces.append(_snake_case )
return new_pieces
def snake_case ( self , _snake_case ):
"""simple docstring"""
return self.sp_model.PieceToId(_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
return self.sp_model.IdToPiece(_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = """""".join(_snake_case ).replace(_snake_case , """ """ ).strip()
return out_string
def snake_case ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case ( self , _snake_case , _snake_case = None , _snake_case = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is not None:
return ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1, 1]
return ([0] * len(_snake_case )) + [1, 1]
def snake_case ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , """wb""" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
def snake_case ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = super()._decode(*_snake_case , **_snake_case )
_lowerCAmelCase = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 82 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case_ (lowerCamelCase_ ):
@staticmethod
@abstractmethod
def lowerCamelCase__( __snake_case :ArgumentParser ) -> Dict:
raise NotImplementedError()
@abstractmethod
def lowerCamelCase__( self :Union[str, Any] ) -> Dict:
raise NotImplementedError()
| 240 | 0 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase__ ( __lowercase ):
a__ : Tuple = """data2vec-audio"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=32 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_68 , SCREAMING_SNAKE_CASE__ : List[str]=12 , SCREAMING_SNAKE_CASE__ : Tuple=12 , SCREAMING_SNAKE_CASE__ : List[Any]=30_72 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Any=1e-5 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , SCREAMING_SNAKE_CASE__ : List[Any]=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE__ : Dict=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : List[str]=16 , SCREAMING_SNAKE_CASE__ : Tuple=19 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.05 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=10 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Dict=10 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]="sum" , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=2_56 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 15_00) , SCREAMING_SNAKE_CASE__ : Dict=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE__ : List[str]=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Dict=1 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : Any , ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = list(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = list(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = conv_pos_kernel_size
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layerdrop
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
__lowerCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# adapter
__lowerCamelCase = add_adapter
__lowerCamelCase = adapter_kernel_size
__lowerCamelCase = adapter_stride
__lowerCamelCase = num_adapter_layers
__lowerCamelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = list(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = list(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = list(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = xvector_output_dim
@property
def __A ( self : Tuple ) -> Optional[Any]:
return math.prod(self.conv_stride )
| 356 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
@property
def __A ( self : List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
__lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __A ( self : Optional[int] ) -> Optional[Any]:
__lowerCamelCase = self.dummy_uncond_unet
__lowerCamelCase = ScoreSdeVeScheduler()
__lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
sde_ve.to(SCREAMING_SNAKE_CASE__ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[
0
]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Tuple ) -> str:
__lowerCamelCase = '''google/ncsnpp-church-256'''
__lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
sde_ve.to(SCREAMING_SNAKE_CASE__ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 339 | 0 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _A ( lowercase ):
"""simple docstring"""
a , a =np.shape(lowercase )
if rows != columns:
a =(
'''\'table\' has to be of square shaped array but got a '''
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(lowercase )
a =np.zeros((rows, columns) )
a =np.zeros((rows, columns) )
for i in range(lowercase ):
for j in range(lowercase ):
a =sum(lower[i][k] * upper[k][j] for k in range(lowercase ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
a =(table[i][j] - total) / upper[j][j]
a =1
for j in range(lowercase , lowercase ):
a =sum(lower[i][k] * upper[k][j] for k in range(lowercase ) )
a =table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod() | 81 |
import argparse
import os
import re
__a = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__a = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__a = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def a ( snake_case__: str , snake_case__: bool = False ):
'''simple docstring'''
with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ = f.read()
lowercase_ = content.split('''\n''' )
lowercase_ = []
lowercase_ = 0
while line_idx < len(snake_case__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowercase_ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowercase_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowercase_ = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowercase_ = sorted(snake_case__ , key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(snake_case__ ) )
elif "\n".join(snake_case__ ) != content:
return True
def a ( snake_case__: bool = False ):
'''simple docstring'''
lowercase_ = [os.path.join(snake_case__ , snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith('''.py''' )]
lowercase_ = [sort_auto_mapping(snake_case__ , overwrite=snake_case__ ) for fname in fnames]
if not overwrite and any(snake_case__ ):
lowercase_ = [f for f, d in zip(snake_case__ , snake_case__ ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__a = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 30 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
__lowerCamelCase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
A__ : Optional[str] = field(
default="tab_fact" ,metadata={"help": "The name of the dataset to use (via the datasets library)."} )
A__ : Optional[str] = field(
default="tab_fact" ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ,)
A__ : int = field(
default=10_24 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
A__ : bool = field(
default=A_ ,metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
A__ : bool = field(
default=A_ ,metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} ,)
A__ : Optional[int] = field(
default=A_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
A__ : Optional[int] = field(
default=A_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
A__ : Optional[int] = field(
default=A_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} ,)
A__ : Optional[str] = field(
default=A_ ,metadata={"help": "A csv or a json file containing the training data."} )
A__ : Optional[str] = field(
default=A_ ,metadata={"help": "A csv or a json file containing the validation data."} )
A__ : Optional[str] = field(default=A_ ,metadata={"help": "A csv or a json file containing the test data."} )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
snake_case : List[str] = self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
snake_case : List[Any] = self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class UpperCAmelCase :
A__ : str = field(
default=A_ ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
A__ : bool = field(
default=A_ ,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} ,)
A__ : str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
A__ : bool = field(
default=A_ ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
def UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case , snake_case , snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case , snake_case , snake_case : Optional[Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case : Dict = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
datasets.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
snake_case : Any = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
snake_case : Any = data_args.train_file.split("." )[-1]
snake_case : Optional[int] = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
snake_case : List[Any] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
snake_case : Optional[int] = load_dataset("csv" , data_files=__lowerCamelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
snake_case : str = load_dataset("json" , data_files=__lowerCamelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
snake_case : Optional[Any] = raw_datasets["train"].features["label"].names
snake_case : Union[str, Any] = len(__lowerCamelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
snake_case : int = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__lowerCamelCase , )
snake_case : Union[str, Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
snake_case : List[str] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case : Dict = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
snake_case : List[str] = {"Refused": 0, "Entailed": 1}
snake_case : int = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
snake_case : Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__lowerCamelCase : List[str] ):
# Tokenize the texts
def _convert_table_text_to_pandas(__lowerCamelCase : List[Any] ):
snake_case : Tuple = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
snake_case : Union[str, Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
snake_case : Optional[int] = examples["statement"]
snake_case : Union[str, Any] = list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
snake_case : int = tokenizer(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase )
snake_case : str = examples["label"]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
snake_case : int = raw_datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
snake_case : Optional[Any] = raw_datasets["train"]
if data_args.max_train_samples is not None:
snake_case : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
snake_case : int = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
snake_case : Any = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
snake_case : int = raw_datasets["test"]
if data_args.max_predict_samples is not None:
snake_case : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__lowerCamelCase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCamelCase : EvalPrediction ):
snake_case : List[str] = p.predictions[0] if isinstance(p.predictions , __lowerCamelCase ) else p.predictions
snake_case : Optional[int] = np.argmax(__lowerCamelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case : int = default_data_collator
elif training_args.fpaa:
snake_case : Any = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 )
else:
snake_case : Dict = None
# Initialize our Trainer
snake_case : Optional[Any] = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCamelCase , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
snake_case : Tuple = None
if training_args.resume_from_checkpoint is not None:
snake_case : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case : str = last_checkpoint
snake_case : Optional[Any] = trainer.train(resume_from_checkpoint=__lowerCamelCase )
snake_case : int = train_result.metrics
snake_case : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCamelCase )
)
snake_case : str = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , __lowerCamelCase )
trainer.save_metrics("train" , __lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case : Tuple = trainer.evaluate(eval_dataset=__lowerCamelCase )
snake_case : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCamelCase )
snake_case : Union[str, Any] = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.log_metrics("eval" , __lowerCamelCase )
trainer.save_metrics("eval" , __lowerCamelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
snake_case : Optional[Any] = predict_dataset.remove_columns("label" )
snake_case : List[str] = trainer.predict(__lowerCamelCase , metric_key_prefix="predict" ).predictions
snake_case : Optional[int] = np.argmax(__lowerCamelCase , axis=1 )
snake_case : Union[str, Any] = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(__lowerCamelCase ):
snake_case : int = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
snake_case : Any = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCamelCase )
else:
trainer.create_model_card(**__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 10 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCamelCase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
__lowerCamelCase = json.load(f)
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = f"""facebook/wmt19-{pair}"""
snake_case : Optional[Any] = self.get_tokenizer(snake_case__ )
snake_case : Dict = self.get_model(snake_case__ )
snake_case : List[Any] = bleu_data[pair]["src"]
snake_case : int = bleu_data[pair]["tgt"]
snake_case : Union[str, Any] = tokenizer(snake_case__ , return_tensors="pt" , truncation=snake_case__ , padding="longest" ).to(snake_case__ )
snake_case : str = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
snake_case : Optional[int] = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
snake_case : Optional[int] = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["bleu"] , snake_case__ )
| 10 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = 'deberta-v2'
def __init__( self : int , __lowerCAmelCase : str=12_8100 , __lowerCAmelCase : Optional[Any]=1536 , __lowerCAmelCase : str=24 , __lowerCAmelCase : Tuple=24 , __lowerCAmelCase : Optional[int]=6144 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : Tuple=1e-7 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : List[Any]=-1 , __lowerCAmelCase : str=0 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : Dict="gelu" , **__lowerCAmelCase : List[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = relative_attention
_UpperCAmelCase = max_relative_positions
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = position_biased_input
# Backwards compatibility
if type(__lowerCAmelCase ) == str:
_UpperCAmelCase = [x.strip() for x in pos_att_type.lower().split("""|""" )]
_UpperCAmelCase = pos_att_type
_UpperCAmelCase = vocab_size
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = kwargs.get("""pooler_hidden_size""" , __lowerCAmelCase )
_UpperCAmelCase = pooler_dropout
_UpperCAmelCase = pooler_hidden_act
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.task == "multiple-choice":
_UpperCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def lowerCAmelCase_ ( self : List[str] ):
return 12
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : "PreTrainedTokenizerBase" = None , ):
_UpperCAmelCase = super().generate_dummy_inputs(preprocessor=__lowerCAmelCase , framework=__lowerCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 289 | """simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 3_2
def __UpperCAmelCase ( lowercase ,lowercase = 16 ):
"""simple docstring"""
_UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase = datasets.map(
lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase = 8
else:
_UpperCAmelCase = None
return tokenizer.pad(
lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
_UpperCAmelCase = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1":
_UpperCAmelCase = 2
# Initialize accelerator
_UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config["""lr"""]
_UpperCAmelCase = int(config["""num_epochs"""] )
_UpperCAmelCase = int(config["""seed"""] )
_UpperCAmelCase = int(config["""batch_size"""] )
_UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase )
def inner_training_loop(lowercase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase )
_UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase )
# Instantiate scheduler
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase ,references=lowercase ,)
_UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,lowercase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 289 | 1 |
import math
import sys
import cva
import numpy as np
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> np.ndarray:
'''simple docstring'''
# For applying gaussian function for each element in matrix.
UpperCamelCase = math.sqrt(UpperCamelCase_ )
UpperCamelCase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> np.ndarray:
'''simple docstring'''
# Creates a gaussian kernel of given dimension.
UpperCamelCase = np.zeros((kernel_size, kernel_size) )
for i in range(0 , UpperCamelCase_ ):
for j in range(0 , UpperCamelCase_ ):
UpperCamelCase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(UpperCamelCase_ , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase = np.zeros(img.shape )
UpperCamelCase = get_gauss_kernel(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase , UpperCamelCase = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
UpperCamelCase = get_slice(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = img_s - img_s[kernel_size // 2, kernel_size // 2]
UpperCamelCase = vec_gaussian(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = np.multiply(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = np.multiply(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = np.sum(UpperCamelCase_ ) / np.sum(UpperCamelCase_ )
UpperCamelCase = val
return imga
def lowercase( UpperCamelCase_ ) -> tuple:
'''simple docstring'''
UpperCamelCase = args[1] if args[1:] else """../image_data/lena.jpg"""
UpperCamelCase = float(args[2] ) if args[2:] else 1.0
UpperCamelCase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
UpperCamelCase = int(args[4] )
UpperCamelCase = kernel_size + abs(kernel_size % 2 - 1 )
else:
UpperCamelCase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parse_args(sys.argv)
_SCREAMING_SNAKE_CASE = cva.imread(filename, 0)
cva.imshow("""input image""", img)
_SCREAMING_SNAKE_CASE = img / 2_5_5
_SCREAMING_SNAKE_CASE = out.astype("""float32""")
_SCREAMING_SNAKE_CASE = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
_SCREAMING_SNAKE_CASE = out * 2_5_5
_SCREAMING_SNAKE_CASE = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 165 | from math import pi
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 165 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_snake_case = logging.get_logger(__name__)
_snake_case = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
_snake_case = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
_snake_case = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class UpperCamelCase ( lowerCAmelCase__ ):
UpperCamelCase : Dict = """whisper"""
UpperCamelCase : str = ["""past_key_values"""]
UpperCamelCase : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str=51865 , UpperCAmelCase__ : Union[str, Any]=80 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : int=1536 , UpperCAmelCase__ : Optional[Any]=1536 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Tuple=50257 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : int=256 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Union[str, Any]=0.0_2 , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Any=1500 , UpperCAmelCase__ : str=448 , UpperCAmelCase__ : Optional[int]=50256 , UpperCAmelCase__ : Any=50256 , UpperCAmelCase__ : str=50256 , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]=[220, 50256] , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : str=256 , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : int=0.0_5 , UpperCAmelCase__ : Any=10 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=10 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Tuple=7 , **UpperCAmelCase__ : Union[str, Any] , ) -> Tuple:
_a : Any = vocab_size
_a : Optional[Any] = num_mel_bins
_a : Optional[Any] = d_model
_a : List[str] = encoder_layers
_a : str = encoder_attention_heads
_a : Union[str, Any] = decoder_layers
_a : str = decoder_attention_heads
_a : Any = decoder_ffn_dim
_a : int = encoder_ffn_dim
_a : List[Any] = dropout
_a : int = attention_dropout
_a : Union[str, Any] = activation_dropout
_a : str = activation_function
_a : Optional[int] = init_std
_a : Tuple = encoder_layerdrop
_a : Optional[Any] = decoder_layerdrop
_a : Union[str, Any] = use_cache
_a : List[str] = encoder_layers
_a : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_a : Dict = max_source_positions
_a : Union[str, Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_a : Union[str, Any] = classifier_proj_size
_a : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a : Optional[int] = apply_spec_augment
_a : int = mask_time_prob
_a : List[str] = mask_time_length
_a : List[Any] = mask_time_min_masks
_a : str = mask_feature_prob
_a : Dict = mask_feature_length
_a : Optional[int] = mask_feature_min_masks
_a : str = median_filter_width
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , suppress_tokens=__a , begin_suppress_tokens=__a , **__a , )
class UpperCamelCase ( lowerCAmelCase__ ):
@property
def _lowercase ( self : Dict ) -> List[Any]:
_a : List[Any] = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
_a : Any = {0: """batch"""}
else:
_a : str = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__a , direction="""inputs""" )
return common_inputs
def _lowercase ( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] = -1 , UpperCAmelCase__ : List[str] = -1 , UpperCAmelCase__ : str = False , UpperCAmelCase__ : List[str] = None , UpperCAmelCase__ : Tuple = 22050 , UpperCAmelCase__ : Tuple = 5.0 , UpperCAmelCase__ : Any = 220 , ) -> Union[str, Any]:
_a : Dict = OrderedDict()
_a : List[str] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__a , framework=__a , sampling_rate=__a , time_duration=__a , frequency=__a , )
_a : str = encoder_inputs["""input_features"""].shape[2]
_a : List[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
_a : Dict = super().generate_dummy_inputs(
preprocessor.tokenizer , __a , __a , __a , __a )
_a : Optional[Any] = encoder_inputs.pop("""input_features""" )
_a : List[str] = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
_a : Any = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def _lowercase ( self : str ) -> Any:
return 1E-3
| 294 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Tuple = logging.get_logger(__name__)
A : Tuple = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
A : Optional[Any] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )
return sd
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=rename_keys_prefix ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__lowerCAmelCase = key
for name_pair in rename_keys_prefix:
__lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
__lowerCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__lowerCAmelCase = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
__lowerCAmelCase = "pretraining"
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
__lowerCAmelCase = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
__lowerCAmelCase = "vqa_advanced"
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048, "num_labels": 3129}
__lowerCAmelCase = "vqa"
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__lowerCAmelCase = "nlvr"
__lowerCAmelCase = VisualBertConfig(**_UpperCamelCase )
# Load State Dict
__lowerCAmelCase = load_state_dict(_UpperCamelCase )
__lowerCAmelCase = get_new_dict(_UpperCamelCase , _UpperCamelCase )
if model_type == "pretraining":
__lowerCAmelCase = VisualBertForPreTraining(_UpperCamelCase )
elif model_type == "vqa":
__lowerCAmelCase = VisualBertForQuestionAnswering(_UpperCamelCase )
elif model_type == "nlvr":
__lowerCAmelCase = VisualBertForVisualReasoning(_UpperCamelCase )
elif model_type == "multichoice":
__lowerCAmelCase = VisualBertForMultipleChoice(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# Save Checkpoints
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
A : Optional[int] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 57 | 0 |
from PIL import Image
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
def brightness(lowerCAmelCase__ ) -> float:
return 128 + level + (c - 128)
if not -2_55.0 <= level <= 2_55.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(lowerCAmelCase__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
lowercase__ :Optional[Any] = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 97 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Dict = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowercase__ :Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 97 | 1 |
'''simple docstring'''
from typing import Any
class A__ :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = data
_UpperCAmelCase : int = None
def __repr__( self : Any ) -> Tuple:
"""simple docstring"""
return F"""Node({self.data})"""
class A__ :
"""simple docstring"""
def __init__( self : Dict ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : int = None
def __iter__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.head
while node:
yield node.data
_UpperCAmelCase : Union[str, Any] = node.next
def __len__( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : Any ) -> Tuple:
"""simple docstring"""
return "->".join([str(lowerCAmelCase__ ) for item in self] )
def __getitem__( self : List[str] , lowerCAmelCase__ : int ) -> List[Any]:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Any ) -> List[Any]:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
_UpperCAmelCase : Tuple = self.head
for _ in range(lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = current.next
_UpperCAmelCase : Tuple = data
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Any ) -> Any:
"""simple docstring"""
self.insert_nth(len(self ) , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Any ) -> int:
"""simple docstring"""
self.insert_nth(0 , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
_UpperCAmelCase : List[str] = Node(lowerCAmelCase__ )
if self.head is None:
_UpperCAmelCase : Optional[Any] = new_node
elif index == 0:
_UpperCAmelCase : Tuple = self.head # link new_node to head
_UpperCAmelCase : str = new_node
else:
_UpperCAmelCase : str = self.head
for _ in range(index - 1 ):
_UpperCAmelCase : Union[str, Any] = temp.next
_UpperCAmelCase : List[Any] = temp.next
_UpperCAmelCase : Dict = new_node
def _lowerCAmelCase ( self : int ) -> Union[str, Any]: # print every node data
"""simple docstring"""
print(self )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.delete_nth(0 )
def _lowerCAmelCase ( self : Any ) -> Optional[int]: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : int = 0 ) -> Tuple:
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
_UpperCAmelCase : Dict = self.head # default first node
if index == 0:
_UpperCAmelCase : Union[str, Any] = self.head.next
else:
_UpperCAmelCase : int = self.head
for _ in range(index - 1 ):
_UpperCAmelCase : List[str] = temp.next
_UpperCAmelCase : int = temp.next
_UpperCAmelCase : Optional[Any] = temp.next.next
return delete_node.data
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.head is None
def _lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Tuple = self.head
while current:
# Store the current node's next node.
_UpperCAmelCase : Optional[int] = current.next
# Make the current node's next point backwards
_UpperCAmelCase : Optional[Any] = prev
# Make the previous node be the current node
_UpperCAmelCase : List[Any] = current
# Make the current node the next node (to progress iteration)
_UpperCAmelCase : Tuple = next_node
# Return prev in order to put the head at the end
_UpperCAmelCase : Any = prev
def __UpperCAmelCase ( ):
_UpperCAmelCase : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowerCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowerCamelCase ) == i
linked_list.insert_nth(_lowerCamelCase, i + 1 )
assert str(_lowerCamelCase ) == "->".join(str(_lowerCamelCase ) for i in range(1, 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowerCamelCase ) == "->".join(str(_lowerCamelCase ) for i in range(0, 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowerCamelCase ) == 9
assert str(_lowerCamelCase ) == "->".join(str(_lowerCamelCase ) for i in range(1, 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0, 9 ) ) is True
for i in range(0, 9 ):
_UpperCAmelCase : List[Any] = -i
assert all(linked_list[i] == -i for i in range(0, 9 ) ) is True
linked_list.reverse()
assert str(_lowerCamelCase ) == "->".join(str(_lowerCamelCase ) for i in range(-8, 1 ) )
def __UpperCAmelCase ( ):
_UpperCAmelCase : str = [
-9,
100,
Node(77_345_112 ),
"""dlrow olleH""",
7,
5_555,
0,
-192.55_555,
"""Hello, world!""",
77.9,
Node(10 ),
None,
None,
12.20,
]
_UpperCAmelCase : Any = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowerCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowerCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_UpperCAmelCase : Optional[Any] = linked_list.delete_head()
assert result == -9
assert (
str(_lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_UpperCAmelCase : Tuple = linked_list.delete_tail()
assert result == 12.2
assert (
str(_lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_UpperCAmelCase : Any = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(_lowerCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowerCamelCase )
assert (
str(_lowerCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowerCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __UpperCAmelCase ( ):
from doctest import testmod
testmod()
_UpperCAmelCase : List[Any] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(_lowerCamelCase )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
_UpperCAmelCase : Dict = input("Enter New Value: " ).strip()
print("New list:" )
print(_lowerCamelCase )
print(f"""length of linked_list is : {len(_lowerCamelCase )}""" )
if __name__ == "__main__":
main() | 145 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: list ):
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(_lowerCamelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_lowerCamelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 112 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
__A : Any = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
__A : int = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
__A : str = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def a__ ( self :int ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) ,codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] ,reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] ,)
def a__ ( self :List[str] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[Any] ):
snake_case_ : List[str] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
snake_case_ : Union[str, Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
snake_case_ : Optional[int] = evaluate(dataset=_UpperCamelCase ,predictions=_UpperCamelCase )
return score | 364 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : int = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 8 | 0 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = ['''model.decoder.embed_positions.weights''']
def a__ ( snake_case ):
"""simple docstring"""
if "emb" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
__SCREAMING_SNAKE_CASE : int = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = list(state_dict.keys() )
__SCREAMING_SNAKE_CASE : Tuple = {}
for key in keys:
__SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(A__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = rename_keys(A__ )
if "in_proj_weight" in key:
# split fused qkv proj
__SCREAMING_SNAKE_CASE : Dict = val[:hidden_size, :]
__SCREAMING_SNAKE_CASE : List[Any] = val[hidden_size : 2 * hidden_size, :]
__SCREAMING_SNAKE_CASE : str = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__SCREAMING_SNAKE_CASE : Tuple = val
else:
__SCREAMING_SNAKE_CASE : str = val
return state_dict, enc_dec_proj_state_dict
def a__ ( snake_case ):
"""simple docstring"""
if checkpoint == "small":
# default config values
__SCREAMING_SNAKE_CASE : List[str] = 1_024
__SCREAMING_SNAKE_CASE : Optional[Any] = 24
__SCREAMING_SNAKE_CASE : str = 16
elif checkpoint == "medium":
__SCREAMING_SNAKE_CASE : Optional[Any] = 1_536
__SCREAMING_SNAKE_CASE : Optional[Any] = 48
__SCREAMING_SNAKE_CASE : Dict = 24
elif checkpoint == "large":
__SCREAMING_SNAKE_CASE : Dict = 2_048
__SCREAMING_SNAKE_CASE : Dict = 48
__SCREAMING_SNAKE_CASE : Dict = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = MusicgenDecoderConfig(
hidden_size=A__ , ffn_dim=hidden_size * 4 , num_hidden_layers=A__ , num_attention_heads=A__ , )
return config
@torch.no_grad()
def a__ ( snake_case , snake_case=None , snake_case=None , snake_case="cpu" ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = MusicGen.get_pretrained(A__ , device=A__ )
__SCREAMING_SNAKE_CASE : Any = decoder_config_from_checkpoint(A__ )
__SCREAMING_SNAKE_CASE : Any = fairseq_model.lm.state_dict()
__SCREAMING_SNAKE_CASE : Union[str, Any] = rename_state_dict(
A__ , hidden_size=decoder_config.hidden_size )
__SCREAMING_SNAKE_CASE : Any = TaEncoderModel.from_pretrained('''t5-base''' )
__SCREAMING_SNAKE_CASE : Dict = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = MusicgenForCausalLM(A__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__SCREAMING_SNAKE_CASE : List[Any] = decoder.load_state_dict(A__ , strict=A__ )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(A__ )
if len(A__ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(A__ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
__SCREAMING_SNAKE_CASE : Dict = MusicgenForConditionalGeneration(text_encoder=A__ , audio_encoder=A__ , decoder=A__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(A__ )
# check we can do a forward pass
__SCREAMING_SNAKE_CASE : Tuple = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[str] = model(input_ids=A__ , decoder_input_ids=A__ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
__SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained('''t5-base''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
__SCREAMING_SNAKE_CASE : List[Any] = MusicgenProcessor(feature_extractor=A__ , tokenizer=A__ )
# set the appropriate bos/pad token ids
__SCREAMING_SNAKE_CASE : Optional[int] = 2_048
__SCREAMING_SNAKE_CASE : List[Any] = 2_048
# set other default generation config params
__SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate )
__SCREAMING_SNAKE_CASE : int = True
__SCREAMING_SNAKE_CASE : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(A__ ).mkdir(exist_ok=A__ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(A__ )
processor.save_pretrained(A__ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(A__ )
processor.push_to_hub(A__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
lowercase_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 303 | import random
from .binary_exp_mod import bin_exp_mod
def UpperCamelCase__ ( A__ , A__=1000 ) -> Optional[int]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
snake_case__ : List[Any] = n - 1
snake_case__ : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
snake_case__ : Union[str, Any] = 0
while count < prec:
snake_case__ : Dict = random.randint(2 , n - 1 )
snake_case__ : Dict = bin_exp_mod(A__ , A__ , A__ )
if b != 1:
snake_case__ : Tuple = True
for _ in range(A__ ):
if b == n - 1:
snake_case__ : List[str] = False
break
snake_case__ : Dict = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase__ : str = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 143 | 0 |
'''simple docstring'''
def __A ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(_SCREAMING_SNAKE_CASE , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"{solution() = }")
| 351 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __A ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(lowerCAmelCase_ ):
if not isinstance(lowerCAmelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(lowerCAmelCase_ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase_ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase_ ).__name__}." )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : Dict = (
(Dataset, IterableDataset) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , stopping_strategy=lowerCAmelCase_ )
else:
return _interleave_iterable_datasets(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , stopping_strategy=lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , ):
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(lowerCAmelCase_ ):
if not isinstance(lowerCAmelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(lowerCAmelCase_ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase_ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase_ ).__name__}." )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : Dict = (
(Dataset, IterableDataset) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , axis=lowerCAmelCase_ )
else:
return _concatenate_iterable_datasets(lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , axis=lowerCAmelCase_ )
| 170 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.