code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowercase_ = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class UpperCamelCase_ ( tr.AbstractTransform):
"""simple docstring"""
def __init__( self , __A = " " ) -> str:
_lowerCAmelCase =sentence_delimiter
def UpperCamelCase__ ( self , __A ) -> Optional[Any]:
return list(__A )
def UpperCamelCase__ ( self , __A ) -> Any:
_lowerCAmelCase =[]
for sent_idx, sentence in enumerate(__A ):
chars.extend(self.process_string(__A ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__A ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowercase_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowercase_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowercase_ = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowercase_ = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
lowercase_ = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCamelCase_ ( datasets.Metric):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def UpperCamelCase__ ( self , __A , __A , __A=False ) -> str:
if concatenate_texts:
return jiwer.compute_measures(
__A , __A , truth_transform=__A , hypothesis_transform=__A , )["wer"]
_lowerCAmelCase =0
_lowerCAmelCase =0
for prediction, reference in zip(__A , __A ):
_lowerCAmelCase =jiwer.compute_measures(
__A , __A , truth_transform=__A , hypothesis_transform=__A , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 707
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ) -> str:
super().__init__()
_lowerCAmelCase =nn.Sequential(
nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , )
_lowerCAmelCase =nn.Embedding(__A , __A )
_lowerCAmelCase =False
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.ModuleList()
for lyr_num in range(__A ):
# FiLM conditional T5 decoder
_lowerCAmelCase =DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A )
self.decoders.append(__A )
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Any:
_lowerCAmelCase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase =self.conditioning_emb(__A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase =torch.broadcast_to(
torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase =self.position_encoding(__A )
_lowerCAmelCase =self.continuous_inputs_projection(__A )
inputs += position_encodings
_lowerCAmelCase =self.dropout(__A )
# decoder: No padding present.
_lowerCAmelCase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase =[(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase =lyr(
__A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0]
_lowerCAmelCase =self.decoder_norm(__A )
_lowerCAmelCase =self.post_dropout(__A )
_lowerCAmelCase =self.spec_out(__A )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Any:
_lowerCAmelCase =self.layer[0](
__A , conditioning_emb=__A , attention_mask=__A , )
if encoder_hidden_states is not None:
_lowerCAmelCase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase =self.layer[1](
__A , key_value_states=__A , attention_mask=__A , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase =self.layer[-1](__A , __A )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> List[Any]:
# pre_self_attention_layer_norm
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.FiLMLayer(__A , __A )
# Self-attention block
_lowerCAmelCase =self.attention(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[int]:
super().__init__()
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> Tuple:
_lowerCAmelCase =self.layer_norm(__A )
_lowerCAmelCase =self.attention(
__A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.film(__A , __A )
_lowerCAmelCase =self.DenseReluDense(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(__A )
_lowerCAmelCase =NewGELUActivation()
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =self.act(self.wi_a(__A ) )
_lowerCAmelCase =self.wi_a(__A )
_lowerCAmelCase =hidden_gelu * hidden_linear
_lowerCAmelCase =self.dropout(__A )
_lowerCAmelCase =self.wo(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A=1E-6 ) -> int:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.ones(__A ) )
_lowerCAmelCase =eps
def UpperCamelCase__ ( self , __A ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A )
_lowerCAmelCase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__A , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , out_features * 2 , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.scale_bias(__A )
_lowerCAmelCase , _lowerCAmelCase =torch.chunk(__A , 2 , -1 )
_lowerCAmelCase =x * (1 + scale) + shift
return x
| 58
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 708
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
| 0
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A , __A , ) -> int:
super().__init__()
self.register_modules(
vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , scheduler=__A , safety_checker=__A , feature_extractor=__A , )
def UpperCamelCase__ ( self , __A = "auto" ) -> List[str]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__A )
def UpperCamelCase__ ( self ) -> int:
self.enable_attention_slicing(__A )
@torch.no_grad()
def __call__( self , __A , __A = 512 , __A = 512 , __A = 50 , __A = 7.5 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , __A = None , **__A , ) -> Tuple:
if isinstance(__A , __A ):
_lowerCAmelCase =1
elif isinstance(__A , __A ):
_lowerCAmelCase =len(__A )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(__A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__A , __A ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(__A )}.''' )
# get prompt text embeddings
_lowerCAmelCase =self.tokenizer(
__A , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_lowerCAmelCase =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_lowerCAmelCase =text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
_lowerCAmelCase =self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =text_embeddings.shape
_lowerCAmelCase =text_embeddings.repeat(1 , __A , 1 )
_lowerCAmelCase =text_embeddings.view(bs_embed * num_images_per_prompt , __A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase =42
if negative_prompt is None:
_lowerCAmelCase =['']
elif type(__A ) is not type(__A ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(__A )} !='''
F''' {type(__A )}.''' )
elif isinstance(__A , __A ):
_lowerCAmelCase =[negative_prompt]
elif batch_size != len(__A ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(__A )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
_lowerCAmelCase =negative_prompt
_lowerCAmelCase =text_input_ids.shape[-1]
_lowerCAmelCase =self.tokenizer(
__A , padding='max_length' , max_length=__A , truncation=__A , return_tensors='pt' , )
_lowerCAmelCase =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase =uncond_embeddings.shape[1]
_lowerCAmelCase =uncond_embeddings.repeat(__A , __A , 1 )
_lowerCAmelCase =uncond_embeddings.view(batch_size * num_images_per_prompt , __A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase =torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase =(batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase =(batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
_lowerCAmelCase =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_lowerCAmelCase =torch.randn(
__A , generator=__A , device='cpu' , dtype=__A ).to(self.device )
_lowerCAmelCase =torch.randn(__A , generator=__A , device='cpu' , dtype=__A ).to(
self.device )
else:
_lowerCAmelCase =torch.randn(
__A , generator=__A , device=self.device , dtype=__A )
_lowerCAmelCase =torch.randn(__A , generator=__A , device=self.device , dtype=__A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_lowerCAmelCase =latents_reference.to(self.device )
_lowerCAmelCase =latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
_lowerCAmelCase =(latents_shape[3] - latents_shape_reference[3]) // 2
_lowerCAmelCase =(latents_shape[2] - latents_shape_reference[2]) // 2
_lowerCAmelCase =latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
_lowerCAmelCase =latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
_lowerCAmelCase =0 if dx < 0 else dx
_lowerCAmelCase =0 if dy < 0 else dy
_lowerCAmelCase =max(-dx , 0 )
_lowerCAmelCase =max(-dy , 0 )
# import pdb
# pdb.set_trace()
_lowerCAmelCase =latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_lowerCAmelCase =self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase ='eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase ={}
if accepts_eta:
_lowerCAmelCase =eta
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase =self.scheduler.scale_model_input(__A , __A )
# predict the noise residual
_lowerCAmelCase =self.unet(__A , __A , encoder_hidden_states=__A ).sample
# perform guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase =noise_pred.chunk(2 )
_lowerCAmelCase =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase =self.scheduler.step(__A , __A , __A , **__A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__A , __A , __A )
_lowerCAmelCase =1 / 0.18_215 * latents
_lowerCAmelCase =self.vae.decode(__A ).sample
_lowerCAmelCase =(image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
_lowerCAmelCase =self.feature_extractor(self.numpy_to_pil(__A ) , return_tensors='pt' ).to(
self.device )
_lowerCAmelCase , _lowerCAmelCase =self.safety_checker(
images=__A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
_lowerCAmelCase =None
if output_type == "pil":
_lowerCAmelCase =self.numpy_to_pil(__A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__A , nsfw_content_detected=__A )
| 709
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
| 0
|
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def UpperCamelCase__ ( a__=None , a__=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=a__ )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : str = field(
metadata={'help': 'The csv file to plot.'} , )
lowercase : bool = field(
default=__lowercase , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
lowercase : bool = field(
default=__lowercase , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
lowercase : bool = field(
default=__lowercase , metadata={'help': 'Disable logarithmic scale when plotting'} , )
lowercase : bool = field(
default=__lowercase , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
lowercase : Optional[str] = field(
default=__lowercase , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
lowercase : Optional[List[str]] = list_field(
default=__lowercase , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'})
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
try:
int(a__ )
return True
except ValueError:
return False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
try:
float(a__ )
return True
except ValueError:
return False
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> List[Any]:
_lowerCAmelCase =args
_lowerCAmelCase =defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
_lowerCAmelCase =csv.DictReader(__A )
for row in reader:
_lowerCAmelCase =row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
_lowerCAmelCase =int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
_lowerCAmelCase =float(row['result'] )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase =plt.subplots()
_lowerCAmelCase ='Time usage' if self.args.is_time else 'Memory usage'
_lowerCAmelCase =title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_lowerCAmelCase =sorted(set(self.result_dict[model_name]['bsz'] ) )
_lowerCAmelCase =sorted(set(self.result_dict[model_name]['seq_len'] ) )
_lowerCAmelCase =self.result_dict[model_name]['result']
((_lowerCAmelCase) , (_lowerCAmelCase)) =(
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_lowerCAmelCase =(
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_lowerCAmelCase =np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__A , )
else:
_lowerCAmelCase =np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_lowerCAmelCase) , (_lowerCAmelCase)) =(
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
_lowerCAmelCase =np.asarray(__A , __A )[: len(__A )]
plt.scatter(
__A , __A , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__A , __A , '--' )
title_str += F''' {label_model_name} vs.'''
_lowerCAmelCase =title_str[:-4]
_lowerCAmelCase ='Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(__A )
plt.xlabel(__A )
plt.ylabel(__A )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =HfArgumentParser(a__ )
_lowerCAmelCase =parser.parse_args_into_dataclasses()[0]
_lowerCAmelCase =Plot(args=a__ )
plot.plot()
if __name__ == "__main__":
main()
| 710
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58
| 0
|
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = (EulerDiscreteScheduler,)
lowercase : Optional[Any] = 10
def UpperCamelCase__ ( self , **__A ) -> Union[str, Any]:
_lowerCAmelCase ={
'num_train_timesteps': 1100,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**__A )
return config
def UpperCamelCase__ ( self ) -> Any:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def UpperCamelCase__ ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def UpperCamelCase__ ( self ) -> List[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__A )
def UpperCamelCase__ ( self ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config()
_lowerCAmelCase =scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase =sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase =scheduler.scale_model_input(__A , __A )
_lowerCAmelCase =model(__A , __A )
_lowerCAmelCase =scheduler.step(__A , __A , __A , generator=__A )
_lowerCAmelCase =output.prev_sample
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config(prediction_type='v_prediction' )
_lowerCAmelCase =scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase =sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase =scheduler.scale_model_input(__A , __A )
_lowerCAmelCase =model(__A , __A )
_lowerCAmelCase =scheduler.step(__A , __A , __A , generator=__A )
_lowerCAmelCase =output.prev_sample
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 0.0_002 ) < 1E-2
assert abs(result_mean.item() - 2.26_76E-06 ) < 1E-3
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config()
_lowerCAmelCase =scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps , device=__A )
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCAmelCase =sample.to(__A )
for t in scheduler.timesteps:
_lowerCAmelCase =scheduler.scale_model_input(__A , __A )
_lowerCAmelCase =model(__A , __A )
_lowerCAmelCase =scheduler.step(__A , __A , __A , generator=__A )
_lowerCAmelCase =output.prev_sample
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config()
_lowerCAmelCase =scheduler_class(**__A , use_karras_sigmas=__A )
scheduler.set_timesteps(self.num_inference_steps , device=__A )
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowerCAmelCase =sample.to(__A )
for t in scheduler.timesteps:
_lowerCAmelCase =scheduler.scale_model_input(__A , __A )
_lowerCAmelCase =model(__A , __A )
_lowerCAmelCase =scheduler.step(__A , __A , __A , generator=__A )
_lowerCAmelCase =output.prev_sample
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
| 711
|
'''simple docstring'''
def UpperCamelCase__ ( a__ = 1_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =2**power
_lowerCAmelCase =0
while n:
_lowerCAmelCase , _lowerCAmelCase =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
| 0
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowercase_ = '''examples/'''
lowercase_ = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
lowercase_ = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
lowercase_ = '''README.md'''
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
with open(a__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase =f.read()
_lowerCAmelCase , _lowerCAmelCase =REPLACE_PATTERNS[pattern]
_lowerCAmelCase =replace.replace('VERSION' , a__ )
_lowerCAmelCase =re_pattern.sub(a__ , a__ )
with open(a__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(a__ )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
for folder, directories, fnames in os.walk(a__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(a__ , a__ ) , a__ , pattern='examples' )
def UpperCamelCase__ ( a__ , a__=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(a__ , a__ , a__ )
if not patch:
update_version_in_examples(a__ )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='🤗 Transformers currently provides the following architectures'
_lowerCAmelCase ='1. Want to contribute a new model?'
with open(a__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase =f.readlines()
# Find the start of the list.
_lowerCAmelCase =0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_lowerCAmelCase =start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
_lowerCAmelCase =lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(a__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a__ )
def UpperCamelCase__ ( ):
'''simple docstring'''
with open(REPLACE_FILES['init'] , 'r' ) as f:
_lowerCAmelCase =f.read()
_lowerCAmelCase =REPLACE_PATTERNS['init'][0].search(a__ ).groups()[0]
return packaging.version.parse(a__ )
def UpperCamelCase__ ( a__=False ):
'''simple docstring'''
_lowerCAmelCase =get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
_lowerCAmelCase =default_version.base_version
elif patch:
_lowerCAmelCase =F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
_lowerCAmelCase =F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
_lowerCAmelCase =input(F'''Which version are you releasing? [{default_version}]''' )
if len(a__ ) == 0:
_lowerCAmelCase =default_version
print(F'''Updating version to {version}.''' )
global_version_update(a__ , patch=a__ )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =get_version()
_lowerCAmelCase =F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
_lowerCAmelCase =current_version.base_version
# Check with the user we got that right.
_lowerCAmelCase =input(F'''Which version are we developing now? [{dev_version}]''' )
if len(a__ ) == 0:
_lowerCAmelCase =dev_version
print(F'''Updating version to {version}.''' )
global_version_update(a__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowercase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 712
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCAmelCase =set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 0
|
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
_lowerCAmelCase =re.match(r'^mobilenet_v1_([^_]*)_([^_]*)$' , a__ )
if matches:
_lowerCAmelCase =float(matches[1] )
_lowerCAmelCase =int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_lowerCAmelCase =1_0_0_1
_lowerCAmelCase ='imagenet-1k-id2label.json'
_lowerCAmelCase ='huggingface/label-files'
_lowerCAmelCase =json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase ={int(a__ ) + 1: v for k, v in idalabel.items()}
_lowerCAmelCase ='background'
_lowerCAmelCase =idalabel
_lowerCAmelCase ={v: k for k, v in idalabel.items()}
return config
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase =Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( a__ , a__ , a__ , a__=False ):
'''simple docstring'''
_lowerCAmelCase =get_mobilenet_va_config(a__ )
# Load 🤗 model
_lowerCAmelCase =MobileNetVaForImageClassification(a__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(a__ , a__ , a__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_lowerCAmelCase =MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 3_2} , )
_lowerCAmelCase =image_processor(images=prepare_img() , return_tensors='pt' )
_lowerCAmelCase =model(**a__ )
_lowerCAmelCase =outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
_lowerCAmelCase =torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
_lowerCAmelCase =torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
_lowerCAmelCase =None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , a__ , atol=1E-4 )
Path(a__ ).mkdir(exist_ok=a__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if push_to_hub:
print('Pushing to the hub...' )
_lowerCAmelCase ='google/' + model_name
image_processor.push_to_hub(a__ )
model.push_to_hub(a__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 713
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 58
| 0
|
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def UpperCamelCase__ ( a__ = 1_0_0_0_0_0_0 , a__ = 1_0 ):
'''simple docstring'''
_lowerCAmelCase =defaultdict(a__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_lowerCAmelCase =max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_lowerCAmelCase =1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(a__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 714
|
'''simple docstring'''
lowercase_ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='Morse code here!'
print(a__ )
_lowerCAmelCase =encrypt(a__ )
print(a__ )
_lowerCAmelCase =decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowercase_ = logging.getLogger(__name__)
lowercase_ = '''Hello world! cécé herlolip'''
lowercase_ = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def UpperCamelCase__ ( a__ , a__ ) -> int:
'''simple docstring'''
_lowerCAmelCase =BertAbsConfig(
temp_dir='.' , finetune_bert=a__ , large=a__ , share_emb=a__ , use_bert_emb=a__ , encoder='bert' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
_lowerCAmelCase =torch.load(a__ , lambda a__ , a__ : storage )
_lowerCAmelCase =AbsSummarizer(a__ , torch.device('cpu' ) , a__ )
original.eval()
_lowerCAmelCase =BertAbsSummarizer(a__ , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
_lowerCAmelCase =BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
_lowerCAmelCase =tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(a__ )) )
_lowerCAmelCase =torch.tensor(a__ ).unsqueeze(0 )
_lowerCAmelCase =tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(a__ )) )
_lowerCAmelCase =torch.tensor(a__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_lowerCAmelCase =encoder_input_ids
_lowerCAmelCase =decoder_input_ids
_lowerCAmelCase =_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =_lowerCAmelCase =None
_lowerCAmelCase =_lowerCAmelCase =None
_lowerCAmelCase =None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_lowerCAmelCase =original(a__ , a__ , a__ , a__ , a__ , a__ , a__ )[0]
_lowerCAmelCase =original.generator(a__ )
_lowerCAmelCase =new_model(
a__ , a__ , a__ , a__ , a__ )[0]
_lowerCAmelCase =new_model.generator(a__ )
_lowerCAmelCase =torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(a__ ) )
_lowerCAmelCase =torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(a__ ) )
_lowerCAmelCase =torch.allclose(a__ , a__ , atol=1E-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowercase_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 715
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'data2vec-text'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =[]
create_all_state(1 , a__ , a__ , [] , a__ )
return result
def UpperCamelCase__ ( a__ , a__ , a__ , a__ , a__ , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(a__ , total_number - level + 2 ):
current_list.append(a__ )
create_all_state(i + 1 , a__ , level - 1 , a__ , a__ )
current_list.pop()
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
for i in total_list:
print(*a__ )
if __name__ == "__main__":
lowercase_ = 4
lowercase_ = 2
lowercase_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 716
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = IFPipeline
lowercase : Tuple = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : int = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase__ ( self ) -> str:
return self._get_dummy_components()
def UpperCamelCase__ ( self , __A , __A=0 ) -> int:
if str(__A ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(__A )
else:
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(__A )
_lowerCAmelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ) -> str:
self._test_save_load_local()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
# if
_lowerCAmelCase =IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase =IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__A , tokenizer=__A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_lowerCAmelCase , _lowerCAmelCase =pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase =None
_lowerCAmelCase =None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase =IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase =IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase =IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase =IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__A , __A , __A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 58
| 0
|
'''simple docstring'''
import argparse
import os
import re
lowercase_ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase_ = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase_ = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase_ = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase_ = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase_ = re.compile(r'''\[([^\]]+)\]''')
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =_re_indent.search(a__ )
return "" if search is None else search.groups()[0]
def UpperCamelCase__ ( a__ , a__="" , a__=None , a__=None ):
'''simple docstring'''
_lowerCAmelCase =0
_lowerCAmelCase =code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(a__ ):
index += 1
_lowerCAmelCase =['\n'.join(lines[:index] )]
else:
_lowerCAmelCase =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase =[lines[index]]
index += 1
while index < len(a__ ) and (end_prompt is None or not lines[index].startswith(a__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(a__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(a__ ) )
if index < len(a__ ) - 1:
_lowerCAmelCase =[lines[index + 1]]
index += 1
else:
_lowerCAmelCase =[]
else:
blocks.append('\n'.join(a__ ) )
_lowerCAmelCase =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(a__ ) > 0:
blocks.append('\n'.join(a__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(a__ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
def _inner(a__ ):
return key(a__ ).lower().replace('_' , '' )
return _inner
def UpperCamelCase__ ( a__ , a__=None ):
'''simple docstring'''
def noop(a__ ):
return x
if key is None:
_lowerCAmelCase =noop
# Constants are all uppercase, they go first.
_lowerCAmelCase =[obj for obj in objects if key(a__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase =[obj for obj in objects if key(a__ )[0].isupper() and not key(a__ ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase =[obj for obj in objects if not key(a__ )[0].isupper()]
_lowerCAmelCase =ignore_underscore(a__ )
return sorted(a__ , key=a__ ) + sorted(a__ , key=a__ ) + sorted(a__ , key=a__ )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
def _replace(a__ ):
_lowerCAmelCase =match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
_lowerCAmelCase =[part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase =keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(a__ )] ) + "]"
_lowerCAmelCase =import_statement.split('\n' )
if len(a__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase =2 if lines[1].strip() == '[' else 1
_lowerCAmelCase =[(i, _re_strip_line.search(a__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase =sort_objects(a__ , key=lambda a__ : x[1] )
_lowerCAmelCase =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(a__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase =_re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase =[part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase =keys[:-1]
_lowerCAmelCase =get_indent(lines[1] ) + ', '.join([F'''"{k}"''' for k in sort_objects(a__ )] )
return "\n".join(a__ )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase =_re_bracket_content.sub(_replace , a__ )
return import_statement
def UpperCamelCase__ ( a__ , a__=True ):
'''simple docstring'''
with open(a__ , 'r' ) as f:
_lowerCAmelCase =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase =split_code_in_indented_blocks(
a__ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(a__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase =main_blocks[block_idx]
_lowerCAmelCase =block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase =0
while line_idx < len(a__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase =len(a__ )
else:
line_idx += 1
if line_idx >= len(a__ ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase ='\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase =split_code_in_indented_blocks(a__ , indent_level=a__ )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase =_re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase =[(pattern.search(a__ ).groups()[0] if pattern.search(a__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase =[(i, key) for i, key in enumerate(a__ ) if key is not None]
_lowerCAmelCase =[x[0] for x in sorted(a__ , key=lambda a__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase =0
_lowerCAmelCase =[]
for i in range(len(a__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(a__ )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase ='\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(a__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(a__ , 'w' ) as f:
f.write('\n'.join(a__ ) )
def UpperCamelCase__ ( a__=True ):
'''simple docstring'''
_lowerCAmelCase =[]
for root, _, files in os.walk(a__ ):
if "__init__.py" in files:
_lowerCAmelCase =sort_imports(os.path.join(a__ , '__init__.py' ) , check_only=a__ )
if result:
_lowerCAmelCase =[os.path.join(a__ , '__init__.py' )]
if len(a__ ) > 0:
raise ValueError(F'''Would overwrite {len(a__ )} files, run `make style`.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 717
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =[0]
_lowerCAmelCase =[0]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
_lowerCAmelCase =[60]
_lowerCAmelCase =[10]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =3
_lowerCAmelCase =[1, 2, 3]
_lowerCAmelCase =[3, 2, 1]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 5 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =50
_lowerCAmelCase =[60, 100, 120]
_lowerCAmelCase =[10, 20, 30]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 220 )
if __name__ == "__main__":
unittest.main()
| 58
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowercase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''DPTFeatureExtractor''']
lowercase_ = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 718
|
'''simple docstring'''
lowercase_ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58
| 0
|
'''simple docstring'''
import numpy as np
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase_ = '''sshleifer/mar_enro_6_3_student'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=__A , )
_lowerCAmelCase =F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
MarianMTModel.from_pretrained(__A )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowerCAmelCase =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowerCAmelCase =F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowerCAmelCase =['finetune.py'] + bash_script.split() + args
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationModule.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowerCAmelCase ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowerCAmelCase =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_lowerCAmelCase =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
_lowerCAmelCase =bash_script.replace('--fp16' , '' )
_lowerCAmelCase =6
_lowerCAmelCase =(
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationDistiller.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowerCAmelCase =distill_main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 58
| 0
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'sequence-classification'
def __init__( self , __A ) -> List[Any]:
if type(__A ) == dict:
_lowerCAmelCase =Namespace(**__A )
_lowerCAmelCase =glue_output_modes[hparams.task]
_lowerCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def UpperCamelCase__ ( self , **__A ) -> Any:
return self.model(**__A )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase =outputs[0]
_lowerCAmelCase =self.trainer.lr_schedulers[0]['scheduler']
_lowerCAmelCase ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.hparams
_lowerCAmelCase =processors[args.task]()
_lowerCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase =self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , __A )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_lowerCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_lowerCAmelCase =convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , __A )
torch.save(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A = False ) -> DataLoader:
_lowerCAmelCase ='dev' if mode == 'test' else mode
_lowerCAmelCase =self._feature_file(__A )
logger.info('Loading features from cached file %s' , __A )
_lowerCAmelCase =torch.load(__A )
_lowerCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def UpperCamelCase__ ( self , __A , __A ) -> List[str]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase , _lowerCAmelCase =outputs[:2]
_lowerCAmelCase =logits.detach().cpu().numpy()
_lowerCAmelCase =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __A ) -> tuple:
_lowerCAmelCase =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_lowerCAmelCase =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =np.squeeze(__A )
_lowerCAmelCase =np.concatenate([x['target'] for x in outputs] , axis=0 )
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_lowerCAmelCase =dict(results.items() )
_lowerCAmelCase =results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __A , __A ) -> Any:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'--max_seq_length' , default=128 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=__A , required=__A , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=__A , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
add_generic_args(a__ , os.getcwd() )
_lowerCAmelCase =GLUETransformer.add_model_specific_args(a__ , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase =os.path.join(
'./results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_lowerCAmelCase =GLUETransformer(a__ )
_lowerCAmelCase =generic_train(a__ , a__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=a__ ) )
_lowerCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(a__ )
if __name__ == "__main__":
main()
| 720
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'sequence-classification'
def __init__( self , __A ) -> List[Any]:
if type(__A ) == dict:
_lowerCAmelCase =Namespace(**__A )
_lowerCAmelCase =glue_output_modes[hparams.task]
_lowerCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def UpperCamelCase__ ( self , **__A ) -> Any:
return self.model(**__A )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase =outputs[0]
_lowerCAmelCase =self.trainer.lr_schedulers[0]['scheduler']
_lowerCAmelCase ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.hparams
_lowerCAmelCase =processors[args.task]()
_lowerCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase =self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , __A )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_lowerCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_lowerCAmelCase =convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , __A )
torch.save(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A = False ) -> DataLoader:
_lowerCAmelCase ='dev' if mode == 'test' else mode
_lowerCAmelCase =self._feature_file(__A )
logger.info('Loading features from cached file %s' , __A )
_lowerCAmelCase =torch.load(__A )
_lowerCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def UpperCamelCase__ ( self , __A , __A ) -> List[str]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase , _lowerCAmelCase =outputs[:2]
_lowerCAmelCase =logits.detach().cpu().numpy()
_lowerCAmelCase =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __A ) -> tuple:
_lowerCAmelCase =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_lowerCAmelCase =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =np.squeeze(__A )
_lowerCAmelCase =np.concatenate([x['target'] for x in outputs] , axis=0 )
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_lowerCAmelCase =dict(results.items() )
_lowerCAmelCase =results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __A , __A ) -> Any:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'--max_seq_length' , default=128 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=__A , required=__A , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=__A , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
add_generic_args(a__ , os.getcwd() )
_lowerCAmelCase =GLUETransformer.add_model_specific_args(a__ , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase =os.path.join(
'./results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_lowerCAmelCase =GLUETransformer(a__ )
_lowerCAmelCase =generic_train(a__ , a__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=a__ ) )
_lowerCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : int
lowercase : TreeNode | None = None
lowercase : TreeNode | None = None
lowercase_ = namedtuple('''CoinsDistribResult''', '''moves excess''')
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(a__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a__ ) != count_coins(a__ ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(a__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_lowerCAmelCase , _lowerCAmelCase =get_distrib(node.left )
_lowerCAmelCase , _lowerCAmelCase =get_distrib(node.right )
_lowerCAmelCase =1 - left_distrib_excess
_lowerCAmelCase =1 - right_distrib_excess
_lowerCAmelCase =(
left_distrib_moves
+ right_distrib_moves
+ abs(a__ )
+ abs(a__ )
)
_lowerCAmelCase =node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a__ , a__ )
return get_distrib(a__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
return (-y * np.log(a__ ) - (1 - y) * np.log(1 - h )).mean()
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =np.dot(a__ , a__ )
return np.sum(y * scores - np.log(1 + np.exp(a__ ) ) )
def UpperCamelCase__ ( a__ , a__ , a__ , a__=7_0_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =np.zeros(x.shape[1] )
for iterations in range(a__ ):
_lowerCAmelCase =np.dot(a__ , a__ )
_lowerCAmelCase =sigmoid_function(a__ )
_lowerCAmelCase =np.dot(x.T , h - y ) / y.size
_lowerCAmelCase =theta - alpha * gradient # updating the weights
_lowerCAmelCase =np.dot(a__ , a__ )
_lowerCAmelCase =sigmoid_function(a__ )
_lowerCAmelCase =cost_function(a__ , a__ )
if iterations % 1_0_0 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
lowercase_ = datasets.load_iris()
lowercase_ = iris.data[:, :2]
lowercase_ = (iris.target != 0) * 1
lowercase_ = 0.1
lowercase_ = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return sigmoid_function(
np.dot(a__ , a__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((lowercase_) , (lowercase_)) = (x[:, 0].min(), x[:, 0].max())
((lowercase_) , (lowercase_)) = (x[:, 1].min(), x[:, 1].max())
((lowercase_) , (lowercase_)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
lowercase_ = np.c_[xxa.ravel(), xxa.ravel()]
lowercase_ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 700
|
'''simple docstring'''
from PIL import Image
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def brightness(a__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(a__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 58
| 0
|
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = 'owlvit_text_model'
def __init__( self , __A=4_9408 , __A=512 , __A=2048 , __A=12 , __A=8 , __A=16 , __A="quick_gelu" , __A=1E-5 , __A=0.0 , __A=0.02 , __A=1.0 , __A=0 , __A=4_9406 , __A=4_9407 , **__A , ) -> Any:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =hidden_act
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =initializer_range
_lowerCAmelCase =initializer_factor
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
_lowerCAmelCase =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'owlvit_vision_model'
def __init__( self , __A=768 , __A=3072 , __A=12 , __A=12 , __A=3 , __A=768 , __A=32 , __A="quick_gelu" , __A=1E-5 , __A=0.0 , __A=0.02 , __A=1.0 , **__A , ) -> Dict:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =num_channels
_lowerCAmelCase =image_size
_lowerCAmelCase =patch_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =initializer_range
_lowerCAmelCase =initializer_factor
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'owlvit'
lowercase : Optional[int] = True
def __init__( self , __A=None , __A=None , __A=512 , __A=2.6_592 , __A=True , **__A , ) -> Optional[int]:
super().__init__(**__A )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
_lowerCAmelCase =OwlViTTextConfig(**__A )
_lowerCAmelCase =OwlViTVisionConfig(**__A )
_lowerCAmelCase =projection_dim
_lowerCAmelCase =logit_scale_init_value
_lowerCAmelCase =return_dict
_lowerCAmelCase =1.0
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
@classmethod
def UpperCamelCase__ ( cls , __A , __A , **__A ) -> Union[str, Any]:
_lowerCAmelCase ={}
_lowerCAmelCase =text_config
_lowerCAmelCase =vision_config
return cls.from_dict(__A , **__A )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def UpperCamelCase__ ( self ) -> float:
return 1E-4
def UpperCamelCase__ ( self , __A , __A = -1 , __A = -1 , __A = None , ) -> Mapping[str, Any]:
_lowerCAmelCase =super().generate_dummy_inputs(
processor.tokenizer , batch_size=__A , seq_length=__A , framework=__A )
_lowerCAmelCase =super().generate_dummy_inputs(
processor.image_processor , batch_size=__A , framework=__A )
return {**text_input_dict, **image_input_dict}
@property
def UpperCamelCase__ ( self ) -> int:
return 14
| 701
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[Any]:
_lowerCAmelCase =TOKEN
HfFolder.save_token(__A )
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id='test-config' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='valid_org/test-config-org' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> List[str]:
CustomConfig.register_for_auto_class()
_lowerCAmelCase =CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowerCAmelCase =AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase =c.n_embd + 1 # int
_lowerCAmelCase =c.resid_pdrop + 1.0 # float
_lowerCAmelCase =not c.scale_attn_weights # bool
_lowerCAmelCase =c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__A , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__A , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__A , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__A , c.summary_type , 'mismatch for key: summary_type' )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =PretrainedConfig()
_lowerCAmelCase =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowerCAmelCase =[key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(__A )}.''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase =mock.Mock()
_lowerCAmelCase =500
_lowerCAmelCase ={}
_lowerCAmelCase =HTTPError
_lowerCAmelCase ={}
# Download this model to make sure it's in the cache.
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__A ) as mock_head:
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase =BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =AutoConfig.from_pretrained('bert-base-cased' )
_lowerCAmelCase =['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
_lowerCAmelCase =2
json.dump(configuration.to_dict() , open(os.path.join(__A , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase =['config.42.0.0.json']
_lowerCAmelCase =768
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , 'config.4.0.0.json' ) , os.path.join(__A , 'config.42.0.0.json' ) )
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase__ ( self ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase ='hf-internal-testing/test-two-configs'
import transformers as new_transformers
_lowerCAmelCase ='v4.0.0'
_lowerCAmelCase , _lowerCAmelCase =new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase ='v3.0.0'
_lowerCAmelCase =old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 768 )
| 58
| 0
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase_ = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
lowercase_ = {
'''gpt-neox-20b''': 2048,
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = VOCAB_FILES_NAMES
lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Tuple = ['input_ids', 'attention_mask']
def __init__( self , __A=None , __A=None , __A=None , __A="<|endoftext|>" , __A="<|endoftext|>" , __A="<|endoftext|>" , __A=False , **__A , ) -> List[Any]:
super().__init__(
__A , __A , tokenizer_file=__A , unk_token=__A , bos_token=__A , eos_token=__A , add_prefix_space=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __A ) != add_prefix_space:
_lowerCAmelCase =getattr(__A , pre_tok_state.pop('type' ) )
_lowerCAmelCase =add_prefix_space
_lowerCAmelCase =pre_tok_class(**__A )
_lowerCAmelCase =add_prefix_space
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def UpperCamelCase__ ( self , __A ) -> List[int]:
_lowerCAmelCase =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__A , add_special_tokens=__A ) + [self.eos_token_id] )
if len(__A ) > self.model_max_length:
_lowerCAmelCase =input_ids[-self.model_max_length :]
return input_ids
| 702
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = 'gptj'
lowercase : Tuple = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __A=5_0400 , __A=2048 , __A=4096 , __A=28 , __A=16 , __A=64 , __A=None , __A="gelu_new" , __A=0.0 , __A=0.0 , __A=0.0 , __A=1E-5 , __A=0.02 , __A=True , __A=5_0256 , __A=5_0256 , __A=False , **__A , ) -> Union[str, Any]:
_lowerCAmelCase =vocab_size
_lowerCAmelCase =n_positions
_lowerCAmelCase =n_embd
_lowerCAmelCase =n_layer
_lowerCAmelCase =n_head
_lowerCAmelCase =n_inner
_lowerCAmelCase =rotary_dim
_lowerCAmelCase =activation_function
_lowerCAmelCase =resid_pdrop
_lowerCAmelCase =embd_pdrop
_lowerCAmelCase =attn_pdrop
_lowerCAmelCase =layer_norm_epsilon
_lowerCAmelCase =initializer_range
_lowerCAmelCase =use_cache
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =eos_token_id
super().__init__(
bos_token_id=__A , eos_token_id=__A , tie_word_embeddings=__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , __A , __A = "default" , __A = None , __A = False , ) -> Dict:
super().__init__(__A , task=__A , patching_specs=__A , use_past=__A )
if not getattr(self._config , 'pad_token_id' , __A ):
# TODO: how to do that better?
_lowerCAmelCase =0
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
_lowerCAmelCase =OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__A , direction='inputs' )
_lowerCAmelCase ={0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCamelCase__ ( self ) -> int:
return self._config.n_layer
@property
def UpperCamelCase__ ( self ) -> int:
return self._config.n_head
def UpperCamelCase__ ( self , __A , __A = -1 , __A = -1 , __A = False , __A = None , ) -> Mapping[str, Any]:
_lowerCAmelCase =super(__A , self ).generate_dummy_inputs(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase =OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowerCAmelCase =seqlen + 2
_lowerCAmelCase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase =[
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
_lowerCAmelCase =common_inputs['attention_mask']
if self.use_past:
_lowerCAmelCase =ordered_inputs['attention_mask'].dtype
_lowerCAmelCase =torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase__ ( self ) -> int:
return 13
| 703
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 704
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =len(a__ ) // 2
# choose the middle 3 elements
_lowerCAmelCase =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
import numpy
# List of input, output pairs
lowercase_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowercase_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowercase_ = [2, 4, 1, 5]
lowercase_ = len(train_data)
lowercase_ = 0.009
def UpperCamelCase__ ( a__ , a__="train" ):
'''simple docstring'''
return calculate_hypothesis_value(a__ , a__ ) - output(
a__ , a__ )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =0
for i in range(len(a__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCamelCase__ ( a__ , a__=m ):
'''simple docstring'''
_lowerCAmelCase =0
for i in range(a__ ):
if index == -1:
summation_value += _error(a__ )
else:
summation_value += _error(a__ ) * train_data[i][0][index]
return summation_value
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =summation_of_cost_derivative(a__ , a__ ) / m
return cost_derivative_value
def UpperCamelCase__ ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_lowerCAmelCase =0.000_002
_lowerCAmelCase =0
_lowerCAmelCase =0
while True:
j += 1
_lowerCAmelCase =[0, 0, 0, 0]
for i in range(0 , len(a__ ) ):
_lowerCAmelCase =get_cost_derivative(i - 1 )
_lowerCAmelCase =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
a__ , a__ , atol=a__ , rtol=a__ , ):
break
_lowerCAmelCase =temp_parameter_vector
print(('Number of iterations:', j) )
def UpperCamelCase__ ( ):
'''simple docstring'''
for i in range(len(a__ ) ):
print(('Actual output value:', output(a__ , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(a__ , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 705
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.txt'''}
lowercase_ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowercase_ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowercase_ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ConvBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ) -> Union[str, Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
_lowerCAmelCase =getattr(__A , normalizer_state.pop('type' ) )
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =strip_accents
_lowerCAmelCase =tokenize_chinese_chars
_lowerCAmelCase =normalizer_class(**__A )
_lowerCAmelCase =do_lower_case
def UpperCamelCase__ ( self , __A , __A=None ) -> int:
_lowerCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 58
| 0
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = ['image_processor', 'tokenizer']
lowercase : Any = 'CLIPImageProcessor'
lowercase : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __A=None , __A=None , **__A ) -> str:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
_lowerCAmelCase =kwargs.pop('feature_extractor' )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Optional[int]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 706
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = ['image_processor', 'tokenizer']
lowercase : Any = 'CLIPImageProcessor'
lowercase : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __A=None , __A=None , **__A ) -> str:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
_lowerCAmelCase =kwargs.pop('feature_extractor' )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Optional[int]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 58
| 0
|
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =int(a__ )
assert noofclusters < len(a__ )
# Find out the dimensionality
_lowerCAmelCase =len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase =list(range(len(a__ ) ) )
shuffle(a__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase =tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase =tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase =[
tf.Variable(vectors[vector_indices[i]] ) for i in range(a__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase =tf.placeholder('float64' , [dim] )
_lowerCAmelCase =[]
for centroid in centroids:
cent_assigns.append(tf.assign(a__ , a__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase =[tf.Variable(0 ) for i in range(len(a__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase =tf.placeholder('int32' )
_lowerCAmelCase =[]
for assignment in assignments:
cluster_assigns.append(tf.assign(a__ , a__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase =tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase =tf.reduce_mean(a__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase =tf.placeholder('float' , [dim] )
_lowerCAmelCase =tf.placeholder('float' , [dim] )
_lowerCAmelCase =tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(a__ , a__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase =tf.placeholder('float' , [noofclusters] )
_lowerCAmelCase =tf.argmin(a__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase =tf.initialize_all_variables()
# Initialize all variables
sess.run(a__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase =1_0_0
for _ in range(a__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(a__ ) ):
_lowerCAmelCase =vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase =[
sess.run(a__ , feed_dict={va: vect, va: sess.run(a__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase =sess.run(
a__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(a__ ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase =[
vectors[i]
for i in range(len(a__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase =sess.run(
a__ , feed_dict={mean_input: array(a__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase =sess.run(a__ )
_lowerCAmelCase =sess.run(a__ )
return centroids, assignments
| 707
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ) -> str:
super().__init__()
_lowerCAmelCase =nn.Sequential(
nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , )
_lowerCAmelCase =nn.Embedding(__A , __A )
_lowerCAmelCase =False
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.ModuleList()
for lyr_num in range(__A ):
# FiLM conditional T5 decoder
_lowerCAmelCase =DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A )
self.decoders.append(__A )
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Any:
_lowerCAmelCase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase =self.conditioning_emb(__A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase =torch.broadcast_to(
torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase =self.position_encoding(__A )
_lowerCAmelCase =self.continuous_inputs_projection(__A )
inputs += position_encodings
_lowerCAmelCase =self.dropout(__A )
# decoder: No padding present.
_lowerCAmelCase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase =[(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase =lyr(
__A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0]
_lowerCAmelCase =self.decoder_norm(__A )
_lowerCAmelCase =self.post_dropout(__A )
_lowerCAmelCase =self.spec_out(__A )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Any:
_lowerCAmelCase =self.layer[0](
__A , conditioning_emb=__A , attention_mask=__A , )
if encoder_hidden_states is not None:
_lowerCAmelCase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase =self.layer[1](
__A , key_value_states=__A , attention_mask=__A , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase =self.layer[-1](__A , __A )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> List[Any]:
# pre_self_attention_layer_norm
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.FiLMLayer(__A , __A )
# Self-attention block
_lowerCAmelCase =self.attention(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[int]:
super().__init__()
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> Tuple:
_lowerCAmelCase =self.layer_norm(__A )
_lowerCAmelCase =self.attention(
__A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.film(__A , __A )
_lowerCAmelCase =self.DenseReluDense(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(__A )
_lowerCAmelCase =NewGELUActivation()
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =self.act(self.wi_a(__A ) )
_lowerCAmelCase =self.wi_a(__A )
_lowerCAmelCase =hidden_gelu * hidden_linear
_lowerCAmelCase =self.dropout(__A )
_lowerCAmelCase =self.wo(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A=1E-6 ) -> int:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.ones(__A ) )
_lowerCAmelCase =eps
def UpperCamelCase__ ( self , __A ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A )
_lowerCAmelCase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__A , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , out_features * 2 , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.scale_bias(__A )
_lowerCAmelCase , _lowerCAmelCase =torch.chunk(__A , 2 , -1 )
_lowerCAmelCase =x * (1 + scale) + shift
return x
| 58
| 0
|
'''simple docstring'''
import pprint
import requests
lowercase_ = '''https://zenquotes.io/api'''
def UpperCamelCase__ ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def UpperCamelCase__ ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
lowercase_ = random_quotes()
pprint.pprint(response)
| 708
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
| 0
|
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__A , 'num_attention_heads' ) )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=64 , __A=3 , __A=3 , __A=2 , __A=1 , __A=16 , __A=[128, 256, 384] , __A=[4, 6, 8] , __A=[2, 3, 4] , __A=[16, 16, 16] , __A=0 , __A=[2, 2, 2] , __A=[2, 2, 2] , __A=0.02 , __A=True , __A=True , __A=2 , ) -> Optional[Any]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =kernel_size
_lowerCAmelCase =stride
_lowerCAmelCase =padding
_lowerCAmelCase =hidden_sizes
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =depths
_lowerCAmelCase =key_dim
_lowerCAmelCase =drop_path_rate
_lowerCAmelCase =patch_size
_lowerCAmelCase =attention_ratio
_lowerCAmelCase =mlp_ratio
_lowerCAmelCase =initializer_range
_lowerCAmelCase =[
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
_lowerCAmelCase =is_training
_lowerCAmelCase =use_labels
_lowerCAmelCase =num_labels
_lowerCAmelCase =initializer_range
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase =self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> List[str]:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase =LevitModel(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(__A )
_lowerCAmelCase =(self.image_size, self.image_size)
_lowerCAmelCase , _lowerCAmelCase =image_size[0], image_size[1]
for _ in range(4 ):
_lowerCAmelCase =floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
_lowerCAmelCase =floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def UpperCamelCase__ ( self , __A , __A , __A ) -> int:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =LevitForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase : Optional[Any] = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase : Union[str, Any] = False
lowercase : List[Any] = False
lowercase : str = False
lowercase : int = False
lowercase : Dict = False
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =LevitModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def UpperCamelCase__ ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ) -> Optional[int]:
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def UpperCamelCase__ ( self ) -> List[Any]:
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def UpperCamelCase__ ( self ) -> List[str]:
pass
@unittest.skip(reason='Levit does not output attentions' )
def UpperCamelCase__ ( self ) -> Any:
pass
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(__A )
_lowerCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , __A )
def UpperCamelCase__ ( self ) -> str:
def check_hidden_states_output(__A , __A , __A ):
_lowerCAmelCase =model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCAmelCase =model(**self._prepare_for_class(__A , __A ) )
_lowerCAmelCase =outputs.hidden_states
_lowerCAmelCase =len(self.model_tester.depths ) + 1
self.assertEqual(len(__A ) , __A )
_lowerCAmelCase =(self.model_tester.image_size, self.model_tester.image_size)
_lowerCAmelCase , _lowerCAmelCase =image_size[0], image_size[1]
for _ in range(4 ):
_lowerCAmelCase =floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
_lowerCAmelCase =floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase =True
check_hidden_states_output(__A , __A , __A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase__ ( self ) -> str:
pass
def UpperCamelCase__ ( self , __A , __A , __A=False ) -> Any:
_lowerCAmelCase =super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def UpperCamelCase__ ( self ) -> str:
if not self.model_tester.is_training:
return
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__A )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
_lowerCAmelCase =model_class(__A )
model.to(__A )
model.train()
_lowerCAmelCase =self._prepare_for_class(__A , __A , return_labels=__A )
_lowerCAmelCase =model(**__A ).loss
loss.backward()
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCAmelCase =False
_lowerCAmelCase =True
for model_class in self.all_model_classes:
if model_class in get_values(__A ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
_lowerCAmelCase =model_class(__A )
model.gradient_checkpointing_enable()
model.to(__A )
model.train()
_lowerCAmelCase =self._prepare_for_class(__A , __A , return_labels=__A )
_lowerCAmelCase =model(**__A ).loss
loss.backward()
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =[
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__A ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
_lowerCAmelCase =problem_type['title']
_lowerCAmelCase =problem_type['num_labels']
_lowerCAmelCase =model_class(__A )
model.to(__A )
model.train()
_lowerCAmelCase =self._prepare_for_class(__A , __A , return_labels=__A )
if problem_type["num_labels"] > 1:
_lowerCAmelCase =inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
_lowerCAmelCase =inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__A ) as warning_list:
_lowerCAmelCase =model(**__A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCamelCase__ ( self ) -> List[Any]:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =LevitModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self ) -> Any:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__A )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=__A , return_tensors='pt' ).to(__A )
# forward pass
with torch.no_grad():
_lowerCAmelCase =model(**__A )
# verify the logits
_lowerCAmelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __A )
_lowerCAmelCase =torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) )
| 709
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
| 0
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase__ ( a__ , a__=0.999 , a__="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(a__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_lowerCAmelCase =[]
for i in range(a__ ):
_lowerCAmelCase =i / num_diffusion_timesteps
_lowerCAmelCase =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a__ ) / alpha_bar_fn(a__ ) , a__ ) )
return torch.tensor(a__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
lowercase : List[str] = [e.name for e in KarrasDiffusionSchedulers]
lowercase : Any = 2
@register_to_config
def __init__( self , __A = 1000 , __A = 0.00_085 , __A = 0.012 , __A = "linear" , __A = None , __A = "epsilon" , __A = "linspace" , __A = 0 , ) -> str:
if trained_betas is not None:
_lowerCAmelCase =torch.tensor(__A , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCAmelCase =torch.linspace(__A , __A , __A , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase =(
torch.linspace(beta_start**0.5 , beta_end**0.5 , __A , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase =betas_for_alpha_bar(__A )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_lowerCAmelCase =1.0 - self.betas
_lowerCAmelCase =torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__A , __A , __A )
def UpperCamelCase__ ( self , __A , __A=None ) -> Optional[Any]:
if schedule_timesteps is None:
_lowerCAmelCase =self.timesteps
_lowerCAmelCase =(schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowerCAmelCase =1 if len(__A ) > 1 else 0
else:
_lowerCAmelCase =timestep.cpu().item() if torch.is_tensor(__A ) else timestep
_lowerCAmelCase =self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase__ ( self ) -> List[str]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase__ ( self , __A , __A , ) -> torch.FloatTensor:
_lowerCAmelCase =self.index_for_timestep(__A )
if self.state_in_first_order:
_lowerCAmelCase =self.sigmas[step_index]
else:
_lowerCAmelCase =self.sigmas_interpol[step_index]
_lowerCAmelCase =sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase__ ( self , __A , __A = None , __A = None , ) -> List[Any]:
_lowerCAmelCase =num_inference_steps
_lowerCAmelCase =num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowerCAmelCase =np.linspace(0 , num_train_timesteps - 1 , __A , dtype=__A )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowerCAmelCase =num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase =(np.arange(0 , __A ) * step_ratio).round()[::-1].copy().astype(__A )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowerCAmelCase =num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase =(np.arange(__A , 0 , -step_ratio )).round().copy().astype(__A )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_lowerCAmelCase =np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowerCAmelCase =torch.from_numpy(np.log(__A ) ).to(__A )
_lowerCAmelCase =np.interp(__A , np.arange(0 , len(__A ) ) , __A )
_lowerCAmelCase =np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowerCAmelCase =torch.from_numpy(__A ).to(device=__A )
# interpolate sigmas
_lowerCAmelCase =sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_lowerCAmelCase =torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowerCAmelCase =torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__A ).startswith('mps' ):
# mps does not support float64
_lowerCAmelCase =torch.from_numpy(__A ).to(__A , dtype=torch.floataa )
else:
_lowerCAmelCase =torch.from_numpy(__A ).to(__A )
# interpolate timesteps
_lowerCAmelCase =self.sigma_to_t(__A ).to(__A , dtype=timesteps.dtype )
_lowerCAmelCase =torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_lowerCAmelCase =torch.cat([timesteps[:1], interleaved_timesteps] )
_lowerCAmelCase =None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowerCAmelCase =defaultdict(__A )
def UpperCamelCase__ ( self , __A ) -> int:
# get log sigma
_lowerCAmelCase =sigma.log()
# get distribution
_lowerCAmelCase =log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowerCAmelCase =dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowerCAmelCase =low_idx + 1
_lowerCAmelCase =self.log_sigmas[low_idx]
_lowerCAmelCase =self.log_sigmas[high_idx]
# interpolate sigmas
_lowerCAmelCase =(low - log_sigma) / (low - high)
_lowerCAmelCase =w.clamp(0 , 1 )
# transform interpolation to time range
_lowerCAmelCase =(1 - w) * low_idx + w * high_idx
_lowerCAmelCase =t.view(sigma.shape )
return t
@property
def UpperCamelCase__ ( self ) -> Tuple:
return self.sample is None
def UpperCamelCase__ ( self , __A , __A , __A , __A = True , ) -> Union[SchedulerOutput, Tuple]:
_lowerCAmelCase =self.index_for_timestep(__A )
# advance index counter by 1
_lowerCAmelCase =timestep.cpu().item() if torch.is_tensor(__A ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowerCAmelCase =self.sigmas[step_index]
_lowerCAmelCase =self.sigmas_interpol[step_index + 1]
_lowerCAmelCase =self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowerCAmelCase =self.sigmas[step_index - 1]
_lowerCAmelCase =self.sigmas_interpol[step_index]
_lowerCAmelCase =self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowerCAmelCase =0
_lowerCAmelCase =sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowerCAmelCase =sigma_hat if self.state_in_first_order else sigma_interpol
_lowerCAmelCase =sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowerCAmelCase =sigma_hat if self.state_in_first_order else sigma_interpol
_lowerCAmelCase =model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowerCAmelCase =(sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowerCAmelCase =sigma_interpol - sigma_hat
# store for 2nd order step
_lowerCAmelCase =sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowerCAmelCase =(sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowerCAmelCase =sigma_next - sigma_hat
_lowerCAmelCase =self.sample
_lowerCAmelCase =None
_lowerCAmelCase =sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__A )
def UpperCamelCase__ ( self , __A , __A , __A , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowerCAmelCase =self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__A ):
# mps does not support float64
_lowerCAmelCase =self.timesteps.to(original_samples.device , dtype=torch.floataa )
_lowerCAmelCase =timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_lowerCAmelCase =self.timesteps.to(original_samples.device )
_lowerCAmelCase =timesteps.to(original_samples.device )
_lowerCAmelCase =[self.index_for_timestep(__A , __A ) for t in timesteps]
_lowerCAmelCase =sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowerCAmelCase =sigma.unsqueeze(-1 )
_lowerCAmelCase =original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> List[Any]:
return self.config.num_train_timesteps
| 710
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58
| 0
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase_ = '''sshleifer/mar_enro_6_3_student'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=__A , )
_lowerCAmelCase =F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
MarianMTModel.from_pretrained(__A )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowerCAmelCase =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowerCAmelCase =F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowerCAmelCase =['finetune.py'] + bash_script.split() + args
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationModule.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowerCAmelCase ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowerCAmelCase =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_lowerCAmelCase =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
_lowerCAmelCase =bash_script.replace('--fp16' , '' )
_lowerCAmelCase =6
_lowerCAmelCase =(
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationDistiller.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowerCAmelCase =distill_main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 711
|
'''simple docstring'''
def UpperCamelCase__ ( a__ = 1_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =2**power
_lowerCAmelCase =0
while n:
_lowerCAmelCase , _lowerCAmelCase =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
| 0
|
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = ['pixel_values']
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BICUBIC , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = IMAGENET_DEFAULT_MEAN , __A = IMAGENET_DEFAULT_STD , **__A , ) -> None:
super().__init__(**__A )
_lowerCAmelCase =size if size is not None else {'shortest_edge': 224}
_lowerCAmelCase =get_size_dict(__A , default_to_square=__A )
_lowerCAmelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCAmelCase =get_size_dict(__A , param_name='crop_size' )
_lowerCAmelCase =do_resize
_lowerCAmelCase =size
_lowerCAmelCase =resample
_lowerCAmelCase =do_center_crop
_lowerCAmelCase =crop_size
_lowerCAmelCase =do_rescale
_lowerCAmelCase =rescale_factor
_lowerCAmelCase =do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ ( self , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> np.ndarray:
_lowerCAmelCase =get_size_dict(__A , default_to_square=__A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowerCAmelCase =int((256 / 224) * size['shortest_edge'] )
_lowerCAmelCase =get_resize_output_image_size(__A , size=__A , default_to_square=__A )
_lowerCAmelCase ={'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
__A , size=(size_dict['height'], size_dict['width']) , resample=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
_lowerCAmelCase =get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(__A , size=(size['height'], size['width']) , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
return rescale(__A , scale=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> BatchFeature:
_lowerCAmelCase =do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase =resample if resample is not None else self.resample
_lowerCAmelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase =do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase =image_std if image_std is not None else self.image_std
_lowerCAmelCase =size if size is not None else self.size
_lowerCAmelCase =get_size_dict(__A , default_to_square=__A )
_lowerCAmelCase =crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase =get_size_dict(__A , param_name='crop_size' )
_lowerCAmelCase =make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase =[to_numpy_array(__A ) for image in images]
if do_resize:
_lowerCAmelCase =[self.resize(__A , __A , __A ) for image in images]
if do_center_crop:
_lowerCAmelCase =[self.center_crop(__A , __A ) for image in images]
if do_rescale:
_lowerCAmelCase =[self.rescale(__A , __A ) for image in images]
if do_normalize:
_lowerCAmelCase =[self.normalize(__A , __A , __A ) for image in images]
_lowerCAmelCase =[to_channel_dimension_format(__A , __A ) for image in images]
_lowerCAmelCase ={'pixel_values': images}
return BatchFeature(data=__A , tensor_type=__A )
| 712
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCAmelCase =set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowercase_ = TypeVar('''KT''')
lowercase_ = TypeVar('''VT''')
class SCREAMING_SNAKE_CASE ( Generic[KT, VT]):
"""simple docstring"""
def __init__( self , __A = "root" , __A = None ) -> Dict:
_lowerCAmelCase =key
_lowerCAmelCase =value
_lowerCAmelCase =[]
def __repr__( self ) -> str:
return F'''Node({self.key}: {self.value})'''
@property
def UpperCamelCase__ ( self ) -> int:
return len(self.forward )
class SCREAMING_SNAKE_CASE ( Generic[KT, VT]):
"""simple docstring"""
def __init__( self , __A = 0.5 , __A = 16 ) -> int:
_lowerCAmelCase =Node[KT, VT]()
_lowerCAmelCase =0
_lowerCAmelCase =p
_lowerCAmelCase =max_level
def __str__( self ) -> str:
_lowerCAmelCase =list(self )
if len(__A ) == 0:
return F'''SkipList(level={self.level})'''
_lowerCAmelCase =max((len(str(__A ) ) for item in items) , default=4 )
_lowerCAmelCase =max(__A , 4 ) + 4
_lowerCAmelCase =self.head
_lowerCAmelCase =[]
_lowerCAmelCase =node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(__A , '-' ) + '* ' * len(__A ) )
lines.append(' ' * label_size + '| ' * len(__A ) )
while len(node.forward ) != 0:
_lowerCAmelCase =node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(__A , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(__A ) )
_lowerCAmelCase =node.forward
lines.append('None'.ljust(__A ) + '* ' * len(__A ) )
return F'''SkipList(level={self.level})\n''' + "\n".join(__A )
def __iter__( self ) -> int:
_lowerCAmelCase =self.head
while len(node.forward ) != 0:
yield node.forward[0].key
_lowerCAmelCase =node.forward[0]
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCamelCase__ ( self , __A ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
_lowerCAmelCase =[]
_lowerCAmelCase =self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_lowerCAmelCase =node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__A )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCamelCase__ ( self , __A ) -> int:
_lowerCAmelCase , _lowerCAmelCase =self._locate_node(__A )
if node is not None:
for i, update_node in enumerate(__A ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_lowerCAmelCase =node.forward[i]
else:
_lowerCAmelCase =update_node.forward[:i]
def UpperCamelCase__ ( self , __A , __A ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase =self._locate_node(__A )
if node is not None:
_lowerCAmelCase =value
else:
_lowerCAmelCase =self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __A ):
update_vector.append(self.head )
_lowerCAmelCase =level
_lowerCAmelCase =Node(__A , __A )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__A )
else:
_lowerCAmelCase =new_node
def UpperCamelCase__ ( self , __A ) -> VT | None:
_lowerCAmelCase , _lowerCAmelCase =self._locate_node(__A )
if node is not None:
return node.value
return None
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 1_2 )
skip_list.insert('Key3' , 4_1 )
skip_list.insert('Key4' , -1_9 )
_lowerCAmelCase =skip_list.head
_lowerCAmelCase ={}
while node.level != 0:
_lowerCAmelCase =node.forward[0]
_lowerCAmelCase =node.value
assert len(a__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert('Key1' , 1_0 )
skip_list.insert('Key1' , 1_2 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 1_0 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 1_0 )
_lowerCAmelCase =skip_list.head
_lowerCAmelCase ={}
while node.level != 0:
_lowerCAmelCase =node.forward[0]
_lowerCAmelCase =node.value
if len(a__ ) != 4:
print()
assert len(a__ ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
assert skip_list.find('Some key' ) is None
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert('Key2' , 2_0 )
assert skip_list.find('Key2' ) == 2_0
skip_list.insert('Some Key' , 1_0 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 1_3 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 1_0
assert skip_list.find('V' ) == 1_3
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 1_4
assert skip_list.find('Key1' ) == 1_2
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 1_2
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4_2 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('X' )
def traverse_keys(a__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(a__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def UpperCamelCase__ ( ):
'''simple docstring'''
def is_sorted(a__ ):
return all(next_item >= item for item, next_item in zip(a__ , lst[1:] ) )
_lowerCAmelCase =SkipList()
for i in range(1_0 ):
skip_list.insert(a__ , a__ )
assert is_sorted(list(a__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(a__ ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(a__ ) )
def UpperCamelCase__ ( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 713
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 58
| 0
|
'''simple docstring'''
import argparse
import json
import subprocess
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =[]
_lowerCAmelCase =(
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
_lowerCAmelCase =subprocess.run(a__ , shell=a__ , stdout=subprocess.PIPE )
_lowerCAmelCase =output.stdout.decode('utf-8' )
_lowerCAmelCase =json.loads(a__ )
_lowerCAmelCase =status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(a__ )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(a__ ) )
if len(a__ ) > 0:
_lowerCAmelCase ='\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return values.split(',' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 714
|
'''simple docstring'''
lowercase_ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='Morse code here!'
print(a__ )
_lowerCAmelCase =encrypt(a__ )
print(a__ )
_lowerCAmelCase =decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 768 , ) -> Tuple:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.zeros(1 , __A ) )
_lowerCAmelCase =nn.Parameter(torch.ones(1 , __A ) )
def UpperCamelCase__ ( self , __A = None , __A = None , ) -> int:
_lowerCAmelCase =nn.Parameter(self.mean.to(__A ).to(__A ) )
_lowerCAmelCase =nn.Parameter(self.std.to(__A ).to(__A ) )
return self
def UpperCamelCase__ ( self , __A ) -> str:
_lowerCAmelCase =(embeds - self.mean) * 1.0 / self.std
return embeds
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =(embeds * self.std) + self.mean
return embeds
| 715
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'data2vec-text'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 58
| 0
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 716
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = IFPipeline
lowercase : Tuple = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : int = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase__ ( self ) -> str:
return self._get_dummy_components()
def UpperCamelCase__ ( self , __A , __A=0 ) -> int:
if str(__A ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(__A )
else:
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(__A )
_lowerCAmelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ) -> str:
self._test_save_load_local()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
# if
_lowerCAmelCase =IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase =IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__A , tokenizer=__A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_lowerCAmelCase , _lowerCAmelCase =pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase =None
_lowerCAmelCase =None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase =IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase =IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase =IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase =IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__A , __A , __A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 58
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
assert isinstance(a__ , a__ ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
_lowerCAmelCase =F'''The input value of [n={number}] has to be > 0'''
raise ValueError(a__ )
else:
_lowerCAmelCase =sylvester(number - 1 )
_lowerCAmelCase =num - 1
_lowerCAmelCase =num
return lower * upper + 1
if __name__ == "__main__":
print(f'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
| 717
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =[0]
_lowerCAmelCase =[0]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
_lowerCAmelCase =[60]
_lowerCAmelCase =[10]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =3
_lowerCAmelCase =[1, 2, 3]
_lowerCAmelCase =[3, 2, 1]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 5 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =50
_lowerCAmelCase =[60, 100, 120]
_lowerCAmelCase =[10, 20, 30]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 220 )
if __name__ == "__main__":
unittest.main()
| 58
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ = 16
lowercase_ = 32
def UpperCamelCase__ ( a__ , a__ = 1_6 ):
'''simple docstring'''
_lowerCAmelCase =AutoTokenizer.from_pretrained('bert-base-cased' )
_lowerCAmelCase =load_dataset('glue' , 'mrpc' )
def tokenize_function(a__ ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a__ , max_length=a__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase =datasets.map(
a__ , batched=a__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase =1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase =1_6
elif accelerator.mixed_precision != "no":
_lowerCAmelCase =8
else:
_lowerCAmelCase =None
return tokenizer.pad(
a__ , padding='longest' , max_length=a__ , pad_to_multiple_of=a__ , return_tensors='pt' , )
# Instantiate dataloaders.
_lowerCAmelCase =DataLoader(
tokenized_datasets['train'] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
_lowerCAmelCase =DataLoader(
tokenized_datasets['validation'] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase_ = mocked_dataloaders # noqa: F811
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' , a__ ) == "1":
_lowerCAmelCase =2
# Initialize accelerator
_lowerCAmelCase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase =config['lr']
_lowerCAmelCase =int(config['num_epochs'] )
_lowerCAmelCase =int(config['seed'] )
_lowerCAmelCase =int(config['batch_size'] )
_lowerCAmelCase =evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_lowerCAmelCase =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowerCAmelCase =batch_size // MAX_GPU_BATCH_SIZE
_lowerCAmelCase =MAX_GPU_BATCH_SIZE
set_seed(a__ )
_lowerCAmelCase , _lowerCAmelCase =get_dataloaders(a__ , a__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=a__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase =model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase =AdamW(params=model.parameters() , lr=a__ )
# Instantiate scheduler
_lowerCAmelCase =get_linear_schedule_with_warmup(
optimizer=a__ , num_warmup_steps=1_0_0 , num_training_steps=(len(a__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =accelerator.prepare(
a__ , a__ , a__ , a__ , a__ )
# Now we train the model
for epoch in range(a__ ):
model.train()
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCAmelCase =model(**a__ )
_lowerCAmelCase =outputs.loss
_lowerCAmelCase =loss / gradient_accumulation_steps
accelerator.backward(a__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_lowerCAmelCase =0
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase =model(**a__ )
_lowerCAmelCase =outputs.logits.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase =accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(a__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_lowerCAmelCase =predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCAmelCase =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=a__ , references=a__ , )
_lowerCAmelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , a__ )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=a__ , default=a__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase ={'lr': 2E-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(a__ , a__ )
if __name__ == "__main__":
main()
| 718
|
'''simple docstring'''
lowercase_ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase_ = '''sshleifer/mar_enro_6_3_student'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=__A , )
_lowerCAmelCase =F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
MarianMTModel.from_pretrained(__A )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowerCAmelCase =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowerCAmelCase =F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowerCAmelCase =['finetune.py'] + bash_script.split() + args
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationModule.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowerCAmelCase ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowerCAmelCase =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_lowerCAmelCase =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
_lowerCAmelCase =bash_script.replace('--fp16' , '' )
_lowerCAmelCase =6
_lowerCAmelCase =(
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationDistiller.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowerCAmelCase =distill_main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 58
| 0
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = (DPMSolverSDEScheduler,)
lowercase : Dict = 10
def UpperCamelCase__ ( self , **__A ) -> Union[str, Any]:
_lowerCAmelCase ={
'num_train_timesteps': 1100,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**__A )
return config
def UpperCamelCase__ ( self ) -> Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def UpperCamelCase__ ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def UpperCamelCase__ ( self ) -> Any:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__A )
def UpperCamelCase__ ( self ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config()
_lowerCAmelCase =scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase =sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase =scheduler.scale_model_input(__A , __A )
_lowerCAmelCase =model(__A , __A )
_lowerCAmelCase =scheduler.step(__A , __A , __A )
_lowerCAmelCase =output.prev_sample
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config(prediction_type='v_prediction' )
_lowerCAmelCase =scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase =sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase =scheduler.scale_model_input(__A , __A )
_lowerCAmelCase =model(__A , __A )
_lowerCAmelCase =scheduler.step(__A , __A , __A )
_lowerCAmelCase =output.prev_sample
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config()
_lowerCAmelCase =scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps , device=__A )
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter.to(__A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase =scheduler.scale_model_input(__A , __A )
_lowerCAmelCase =model(__A , __A )
_lowerCAmelCase =scheduler.step(__A , __A , __A )
_lowerCAmelCase =output.prev_sample
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config()
_lowerCAmelCase =scheduler_class(**__A , use_karras_sigmas=__A )
scheduler.set_timesteps(self.num_inference_steps , device=__A )
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter.to(__A ) * scheduler.init_noise_sigma
_lowerCAmelCase =sample.to(__A )
for t in scheduler.timesteps:
_lowerCAmelCase =scheduler.scale_model_input(__A , __A )
_lowerCAmelCase =model(__A , __A )
_lowerCAmelCase =scheduler.step(__A , __A , __A )
_lowerCAmelCase =output.prev_sample
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 720
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'sequence-classification'
def __init__( self , __A ) -> List[Any]:
if type(__A ) == dict:
_lowerCAmelCase =Namespace(**__A )
_lowerCAmelCase =glue_output_modes[hparams.task]
_lowerCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def UpperCamelCase__ ( self , **__A ) -> Any:
return self.model(**__A )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase =outputs[0]
_lowerCAmelCase =self.trainer.lr_schedulers[0]['scheduler']
_lowerCAmelCase ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.hparams
_lowerCAmelCase =processors[args.task]()
_lowerCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase =self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , __A )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_lowerCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_lowerCAmelCase =convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , __A )
torch.save(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A = False ) -> DataLoader:
_lowerCAmelCase ='dev' if mode == 'test' else mode
_lowerCAmelCase =self._feature_file(__A )
logger.info('Loading features from cached file %s' , __A )
_lowerCAmelCase =torch.load(__A )
_lowerCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def UpperCamelCase__ ( self , __A , __A ) -> List[str]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase , _lowerCAmelCase =outputs[:2]
_lowerCAmelCase =logits.detach().cpu().numpy()
_lowerCAmelCase =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __A ) -> tuple:
_lowerCAmelCase =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_lowerCAmelCase =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =np.squeeze(__A )
_lowerCAmelCase =np.concatenate([x['target'] for x in outputs] , axis=0 )
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_lowerCAmelCase =dict(results.items() )
_lowerCAmelCase =results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __A , __A ) -> Any:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'--max_seq_length' , default=128 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=__A , required=__A , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=__A , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
add_generic_args(a__ , os.getcwd() )
_lowerCAmelCase =GLUETransformer.add_model_specific_args(a__ , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase =os.path.join(
'./results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_lowerCAmelCase =GLUETransformer(a__ )
_lowerCAmelCase =generic_train(a__ , a__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=a__ ) )
_lowerCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return (data["data"], data["target"])
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(a__ , a__ )
# Predict target for test data
_lowerCAmelCase =xgb.predict(a__ )
_lowerCAmelCase =predictions.reshape(len(a__ ) , 1 )
return predictions
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =fetch_california_housing()
_lowerCAmelCase , _lowerCAmelCase =data_handling(a__ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =train_test_split(
a__ , a__ , test_size=0.25 , random_state=1 )
_lowerCAmelCase =xgboost(a__ , a__ , a__ )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(a__ , a__ )}''' )
print(F'''Mean Square Error : {mean_squared_error(a__ , a__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 721
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_lowerCAmelCase =Vector()
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__A ) , '(0,0,0,0,0,1)' )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([1, 2, 3, 4] )
self.assertEqual(len(__A ) , 4 )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([1, 2] )
_lowerCAmelCase =Vector([1, 2, 3, 4, 5] )
_lowerCAmelCase =Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_lowerCAmelCase =Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([1, 2, 3] )
_lowerCAmelCase =Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([1, 2, 3] )
_lowerCAmelCase =Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([1, 2, 3] )
_lowerCAmelCase =Vector([2, -1, 4] ) # for test of dot product
_lowerCAmelCase =Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def UpperCamelCase__ ( self ) -> None:
self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 )
def UpperCamelCase__ ( self ) -> None:
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([1, 2, 3] )
_lowerCAmelCase =Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __A , __A ) ) , '(3,4,7)' )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([1, 0, 0, 0, 0, 0] )
_lowerCAmelCase =x.copy()
self.assertEqual(str(__A ) , str(__A ) )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__A ) , '(0,1,0)' )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(__A ) )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_lowerCAmelCase =[[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__A , __A ) )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_lowerCAmelCase =[[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__A , __A ) )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_lowerCAmelCase =Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(__A ) )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_lowerCAmelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_lowerCAmelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def UpperCamelCase__ ( self ) -> None:
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 700
|
'''simple docstring'''
from PIL import Image
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def brightness(a__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(a__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 58
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = ['pixel_values']
def __init__( self , __A = True , __A = 32 , __A=PILImageResampling.BILINEAR , __A = True , **__A , ) -> None:
_lowerCAmelCase =do_resize
_lowerCAmelCase =do_rescale
_lowerCAmelCase =size_divisor
_lowerCAmelCase =resample
super().__init__(**__A )
def UpperCamelCase__ ( self , __A , __A , __A , __A = None , **__A ) -> np.ndarray:
_lowerCAmelCase , _lowerCAmelCase =get_image_size(__A )
# Rounds the height and width down to the closest multiple of size_divisor
_lowerCAmelCase =height // size_divisor * size_divisor
_lowerCAmelCase =width // size_divisor * size_divisor
_lowerCAmelCase =resize(__A , (new_h, new_w) , resample=__A , data_format=__A , **__A )
return image
def UpperCamelCase__ ( self , __A , __A , __A = None , **__A ) -> np.ndarray:
return rescale(image=__A , scale=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A = None , __A = None , __A=None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> BatchFeature:
_lowerCAmelCase =do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase =size_divisor if size_divisor is not None else self.size_divisor
_lowerCAmelCase =resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
_lowerCAmelCase =make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
_lowerCAmelCase =[to_numpy_array(__A ) for img in images]
if do_resize:
_lowerCAmelCase =[self.resize(__A , size_divisor=__A , resample=__A ) for image in images]
if do_rescale:
_lowerCAmelCase =[self.rescale(__A , scale=1 / 255 ) for image in images]
_lowerCAmelCase =[to_channel_dimension_format(__A , __A ) for image in images]
_lowerCAmelCase ={'pixel_values': images}
return BatchFeature(data=__A , tensor_type=__A )
| 701
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[Any]:
_lowerCAmelCase =TOKEN
HfFolder.save_token(__A )
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id='test-config' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='valid_org/test-config-org' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> List[str]:
CustomConfig.register_for_auto_class()
_lowerCAmelCase =CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowerCAmelCase =AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase =c.n_embd + 1 # int
_lowerCAmelCase =c.resid_pdrop + 1.0 # float
_lowerCAmelCase =not c.scale_attn_weights # bool
_lowerCAmelCase =c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__A , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__A , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__A , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__A , c.summary_type , 'mismatch for key: summary_type' )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =PretrainedConfig()
_lowerCAmelCase =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowerCAmelCase =[key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(__A )}.''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase =mock.Mock()
_lowerCAmelCase =500
_lowerCAmelCase ={}
_lowerCAmelCase =HTTPError
_lowerCAmelCase ={}
# Download this model to make sure it's in the cache.
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__A ) as mock_head:
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase =BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =AutoConfig.from_pretrained('bert-base-cased' )
_lowerCAmelCase =['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
_lowerCAmelCase =2
json.dump(configuration.to_dict() , open(os.path.join(__A , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase =['config.42.0.0.json']
_lowerCAmelCase =768
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , 'config.4.0.0.json' ) , os.path.join(__A , 'config.42.0.0.json' ) )
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase__ ( self ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase ='hf-internal-testing/test-two-configs'
import transformers as new_transformers
_lowerCAmelCase ='v4.0.0'
_lowerCAmelCase , _lowerCAmelCase =new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase ='v3.0.0'
_lowerCAmelCase =old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 768 )
| 58
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ) -> int:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__A ):
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
_lowerCAmelCase =FlaxAutoModel.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__A ):
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
_lowerCAmelCase =FlaxAutoModel.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def UpperCamelCase__ ( self ) -> List[str]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
_lowerCAmelCase =AutoTokenizer.from_pretrained(__A )
_lowerCAmelCase =FlaxBertModel.from_pretrained(__A )
_lowerCAmelCase =tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__A ):
return model(**__A )
eval(**__A ).block_until_ready()
@slow
def UpperCamelCase__ ( self ) -> Any:
for model_name in ["roberta-base", "roberta-large"]:
_lowerCAmelCase =AutoTokenizer.from_pretrained(__A )
_lowerCAmelCase =FlaxRobertaModel.from_pretrained(__A )
_lowerCAmelCase =tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__A ):
return model(**__A )
eval(**__A ).block_until_ready()
def UpperCamelCase__ ( self ) -> Tuple:
with self.assertRaisesRegex(
__A , 'bert-base is not a local folder and is not a valid model identifier' ):
_lowerCAmelCase =FlaxAutoModel.from_pretrained('bert-base' )
def UpperCamelCase__ ( self ) -> List[Any]:
with self.assertRaisesRegex(
__A , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_lowerCAmelCase =FlaxAutoModel.from_pretrained(__A , revision='aaaaaa' )
def UpperCamelCase__ ( self ) -> List[str]:
with self.assertRaisesRegex(
__A , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
_lowerCAmelCase =FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(__A , 'Use `from_pt=True` to load this model' ):
_lowerCAmelCase =FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 703
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58
| 0
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
lowercase_ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =Github(os.environ['GITHUB_TOKEN'] )
_lowerCAmelCase =g.get_repo('huggingface/transformers' )
_lowerCAmelCase =repo.get_issues(state='open' )
for issue in open_issues:
_lowerCAmelCase =sorted([comment for comment in issue.get_comments()] , key=lambda a__ : i.created_at , reverse=a__ )
_lowerCAmelCase =comments[0] if len(a__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 704
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =len(a__ ) // 2
# choose the middle 3 elements
_lowerCAmelCase =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
lowercase_ = '''scheduler_config.json'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = 1
lowercase : Any = 2
lowercase : List[str] = 3
lowercase : Dict = 4
lowercase : str = 5
@dataclass
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : jnp.ndarray
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : Tuple = SCHEDULER_CONFIG_NAME
lowercase : List[Any] = ['dtype']
lowercase : str = []
lowercase : Union[str, Any] = True
@classmethod
def UpperCamelCase__ ( cls , __A = None , __A = None , __A=False , **__A , ) -> Optional[int]:
_lowerCAmelCase , _lowerCAmelCase =cls.load_config(
pretrained_model_name_or_path=__A , subfolder=__A , return_unused_kwargs=__A , **__A , )
_lowerCAmelCase , _lowerCAmelCase =cls.from_config(__A , return_unused_kwargs=__A , **__A )
if hasattr(__A , 'create_state' ) and getattr(__A , 'has_state' , __A ):
_lowerCAmelCase =scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCamelCase__ ( self , __A , __A = False , **__A ) -> Union[str, Any]:
self.save_config(save_directory=__A , push_to_hub=__A , **__A )
@property
def UpperCamelCase__ ( self ) -> Any:
return self._get_compatibles()
@classmethod
def UpperCamelCase__ ( cls ) -> Tuple:
_lowerCAmelCase =list(set([cls.__name__] + cls._compatibles ) )
_lowerCAmelCase =importlib.import_module(__name__.split('.' )[0] )
_lowerCAmelCase =[
getattr(__A , __A ) for c in compatible_classes_str if hasattr(__A , __A )
]
return compatible_classes
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
assert len(a__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(a__ ) - x.ndim) ) , a__ )
def UpperCamelCase__ ( a__ , a__=0.999 , a__=jnp.floataa ):
'''simple docstring'''
def alpha_bar(a__ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
_lowerCAmelCase =[]
for i in range(a__ ):
_lowerCAmelCase =i / num_diffusion_timesteps
_lowerCAmelCase =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(a__ ) / alpha_bar(a__ ) , a__ ) )
return jnp.array(a__ , dtype=a__ )
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : jnp.ndarray
lowercase : jnp.ndarray
lowercase : jnp.ndarray
@classmethod
def UpperCamelCase__ ( cls , __A ) -> Dict:
_lowerCAmelCase =scheduler.config
if config.trained_betas is not None:
_lowerCAmelCase =jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_lowerCAmelCase =jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase =(
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase =betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
_lowerCAmelCase =1.0 - betas
_lowerCAmelCase =jnp.cumprod(__A , axis=0 )
return cls(
alphas=__A , betas=__A , alphas_cumprod=__A , )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =state.alphas_cumprod
_lowerCAmelCase =alphas_cumprod[timesteps] ** 0.5
_lowerCAmelCase =sqrt_alpha_prod.flatten()
_lowerCAmelCase =broadcast_to_shape_from_left(a__ , original_samples.shape )
_lowerCAmelCase =(1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCAmelCase =sqrt_one_minus_alpha_prod.flatten()
_lowerCAmelCase =broadcast_to_shape_from_left(a__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase =get_sqrt_alpha_prod(a__ , a__ , a__ , a__ )
_lowerCAmelCase =sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase =get_sqrt_alpha_prod(a__ , a__ , a__ , a__ )
_lowerCAmelCase =sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 705
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.txt'''}
lowercase_ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowercase_ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowercase_ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ConvBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ) -> Union[str, Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
_lowerCAmelCase =getattr(__A , normalizer_state.pop('type' ) )
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =strip_accents
_lowerCAmelCase =tokenize_chinese_chars
_lowerCAmelCase =normalizer_class(**__A )
_lowerCAmelCase =do_lower_case
def UpperCamelCase__ ( self , __A , __A=None ) -> int:
_lowerCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = ['image_processor', 'tokenizer']
lowercase : Any = 'CLIPImageProcessor'
lowercase : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __A=None , __A=None , **__A ) -> str:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
_lowerCAmelCase =kwargs.pop('feature_extractor' )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Optional[int]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 58
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 707
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ) -> str:
super().__init__()
_lowerCAmelCase =nn.Sequential(
nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , )
_lowerCAmelCase =nn.Embedding(__A , __A )
_lowerCAmelCase =False
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.ModuleList()
for lyr_num in range(__A ):
# FiLM conditional T5 decoder
_lowerCAmelCase =DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A )
self.decoders.append(__A )
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Any:
_lowerCAmelCase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase =self.conditioning_emb(__A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase =torch.broadcast_to(
torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase =self.position_encoding(__A )
_lowerCAmelCase =self.continuous_inputs_projection(__A )
inputs += position_encodings
_lowerCAmelCase =self.dropout(__A )
# decoder: No padding present.
_lowerCAmelCase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase =[(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase =lyr(
__A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0]
_lowerCAmelCase =self.decoder_norm(__A )
_lowerCAmelCase =self.post_dropout(__A )
_lowerCAmelCase =self.spec_out(__A )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Any:
_lowerCAmelCase =self.layer[0](
__A , conditioning_emb=__A , attention_mask=__A , )
if encoder_hidden_states is not None:
_lowerCAmelCase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase =self.layer[1](
__A , key_value_states=__A , attention_mask=__A , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase =self.layer[-1](__A , __A )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> List[Any]:
# pre_self_attention_layer_norm
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.FiLMLayer(__A , __A )
# Self-attention block
_lowerCAmelCase =self.attention(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[int]:
super().__init__()
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> Tuple:
_lowerCAmelCase =self.layer_norm(__A )
_lowerCAmelCase =self.attention(
__A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.film(__A , __A )
_lowerCAmelCase =self.DenseReluDense(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(__A )
_lowerCAmelCase =NewGELUActivation()
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =self.act(self.wi_a(__A ) )
_lowerCAmelCase =self.wi_a(__A )
_lowerCAmelCase =hidden_gelu * hidden_linear
_lowerCAmelCase =self.dropout(__A )
_lowerCAmelCase =self.wo(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A=1E-6 ) -> int:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.ones(__A ) )
_lowerCAmelCase =eps
def UpperCamelCase__ ( self , __A ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A )
_lowerCAmelCase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__A , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , out_features * 2 , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.scale_bias(__A )
_lowerCAmelCase , _lowerCAmelCase =torch.chunk(__A , 2 , -1 )
_lowerCAmelCase =x * (1 + scale) + shift
return x
| 58
| 0
|
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> List[Any]:
raise NotImplementedError()
def UpperCamelCase__ ( self ) -> str:
raise NotImplementedError()
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , __A , __A = False , **__A ) -> Dict:
_lowerCAmelCase =tokenizer
_lowerCAmelCase =skip_prompt
_lowerCAmelCase =decode_kwargs
# variables used in the streaming process
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =True
def UpperCamelCase__ ( self , __A ) -> int:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1' )
elif len(value.shape ) > 1:
_lowerCAmelCase =value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_lowerCAmelCase =False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_lowerCAmelCase =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('\n' ):
_lowerCAmelCase =text[self.print_len :]
_lowerCAmelCase =[]
_lowerCAmelCase =0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_lowerCAmelCase =text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_lowerCAmelCase =text[self.print_len : text.rfind(' ' ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def UpperCamelCase__ ( self ) -> int:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_lowerCAmelCase =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_lowerCAmelCase =text[self.print_len :]
_lowerCAmelCase =[]
_lowerCAmelCase =0
else:
_lowerCAmelCase =''
_lowerCAmelCase =True
self.on_finalized_text(__A , stream_end=__A )
def UpperCamelCase__ ( self , __A , __A = False ) -> Tuple:
print(__A , flush=__A , end='' if not stream_end else None )
def UpperCamelCase__ ( self , __A ) -> str:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , __A , __A = False , __A = None , **__A ) -> List[Any]:
super().__init__(__A , __A , **__A )
_lowerCAmelCase =Queue()
_lowerCAmelCase =None
_lowerCAmelCase =timeout
def UpperCamelCase__ ( self , __A , __A = False ) -> Any:
self.text_queue.put(__A , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Optional[int]:
return self
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 708
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowerCAmelCase =1
_lowerCAmelCase =1
while repunit:
_lowerCAmelCase =(1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCamelCase__ ( a__ = 1_0_0_0_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(a__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'{solution() = }')
| 709
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'perceiver'
def __init__( self , __A=256 , __A=1280 , __A=768 , __A=1 , __A=26 , __A=8 , __A=8 , __A=None , __A=None , __A="kv" , __A=1 , __A=1 , __A="gelu" , __A=0.1 , __A=0.02 , __A=1E-12 , __A=True , __A=262 , __A=2048 , __A=56 , __A=[368, 496] , __A=16 , __A=1920 , __A=16 , __A=[1, 16, 224, 224] , **__A , ) -> Union[str, Any]:
super().__init__(**__A )
_lowerCAmelCase =num_latents
_lowerCAmelCase =d_latents
_lowerCAmelCase =d_model
_lowerCAmelCase =num_blocks
_lowerCAmelCase =num_self_attends_per_block
_lowerCAmelCase =num_self_attention_heads
_lowerCAmelCase =num_cross_attention_heads
_lowerCAmelCase =qk_channels
_lowerCAmelCase =v_channels
_lowerCAmelCase =cross_attention_shape_for_attention
_lowerCAmelCase =self_attention_widening_factor
_lowerCAmelCase =cross_attention_widening_factor
_lowerCAmelCase =hidden_act
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =use_query_residual
# masked language modeling attributes
_lowerCAmelCase =vocab_size
_lowerCAmelCase =max_position_embeddings
# image classification attributes
_lowerCAmelCase =image_size
# flow attributes
_lowerCAmelCase =train_size
# multimodal autoencoding attributes
_lowerCAmelCase =num_frames
_lowerCAmelCase =audio_samples_per_frame
_lowerCAmelCase =samples_per_patch
_lowerCAmelCase =output_shape
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCamelCase__ ( self ) -> float:
return 1E-4
def UpperCamelCase__ ( self , __A , __A = -1 , __A = -1 , __A = -1 , __A = False , __A = None , __A = 3 , __A = 40 , __A = 40 , ) -> Mapping[str, Any]:
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__A , __A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase =compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase =preprocessor.num_special_tokens_to_add(__A )
_lowerCAmelCase =compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase =[' '.join(['a'] ) * seq_length] * batch_size
_lowerCAmelCase =dict(preprocessor(__A , return_tensors=__A ) )
_lowerCAmelCase =inputs.pop('input_ids' )
return inputs
elif isinstance(__A , __A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase =compute_effective_axis_dimension(__A , fixed_dimension=OnnxConfig.default_fixed_batch )
_lowerCAmelCase =self._generate_dummy_images(__A , __A , __A , __A )
_lowerCAmelCase =dict(preprocessor(images=__A , return_tensors=__A ) )
_lowerCAmelCase =inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 710
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58
| 0
|
'''simple docstring'''
lowercase_ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 711
|
'''simple docstring'''
def UpperCamelCase__ ( a__ = 1_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =2**power
_lowerCAmelCase =0
while n:
_lowerCAmelCase , _lowerCAmelCase =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import pi
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCAmelCase =set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 0
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[Any]:
_lowerCAmelCase =TOKEN
HfFolder.save_token(__A )
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id='test-config' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='valid_org/test-config-org' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> List[str]:
CustomConfig.register_for_auto_class()
_lowerCAmelCase =CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowerCAmelCase =AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase =c.n_embd + 1 # int
_lowerCAmelCase =c.resid_pdrop + 1.0 # float
_lowerCAmelCase =not c.scale_attn_weights # bool
_lowerCAmelCase =c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__A , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__A , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__A , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__A , c.summary_type , 'mismatch for key: summary_type' )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =PretrainedConfig()
_lowerCAmelCase =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowerCAmelCase =[key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(__A )}.''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase =mock.Mock()
_lowerCAmelCase =500
_lowerCAmelCase ={}
_lowerCAmelCase =HTTPError
_lowerCAmelCase ={}
# Download this model to make sure it's in the cache.
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__A ) as mock_head:
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase =BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =AutoConfig.from_pretrained('bert-base-cased' )
_lowerCAmelCase =['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
_lowerCAmelCase =2
json.dump(configuration.to_dict() , open(os.path.join(__A , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase =['config.42.0.0.json']
_lowerCAmelCase =768
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , 'config.4.0.0.json' ) , os.path.join(__A , 'config.42.0.0.json' ) )
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase__ ( self ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase ='hf-internal-testing/test-two-configs'
import transformers as new_transformers
_lowerCAmelCase ='v4.0.0'
_lowerCAmelCase , _lowerCAmelCase =new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase ='v3.0.0'
_lowerCAmelCase =old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 768 )
| 713
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 58
| 0
|
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
def decorator(a__ ):
_lowerCAmelCase =getattr(a__ , 'handle_key' , [] )
handle += [key]
setattr(a__ , 'handle_key' , a__ )
return func
return decorator
def UpperCamelCase__ ( *a__ ):
'''simple docstring'''
def decorator(a__ ):
_lowerCAmelCase =getattr(a__ , 'handle_key' , [] )
handle += keys
setattr(a__ , 'handle_key' , a__ )
return func
return decorator
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __new__( cls , __A , __A , __A ) -> Tuple:
_lowerCAmelCase =super().__new__(cls , __A , __A , __A )
if not hasattr(__A , 'key_handler' ):
setattr(__A , 'key_handler' , {} )
setattr(__A , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
_lowerCAmelCase =getattr(__A , 'handle_key' , [] )
for key in handled_keys:
_lowerCAmelCase =value
return new_cls
@staticmethod
def UpperCamelCase__ ( cls ) -> Tuple:
_lowerCAmelCase =get_character()
if char != KEYMAP["undefined"]:
_lowerCAmelCase =ord(__A )
_lowerCAmelCase =cls.key_handler.get(__A )
if handler:
_lowerCAmelCase =char
return handler(cls )
else:
return None
def UpperCamelCase__ ( cls ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 714
|
'''simple docstring'''
lowercase_ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='Morse code here!'
print(a__ )
_lowerCAmelCase =encrypt(a__ )
print(a__ )
_lowerCAmelCase =decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ ) -> int:
'''simple docstring'''
_lowerCAmelCase =len(a__ ) // 2
# choose the middle 3 elements
_lowerCAmelCase =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'data2vec-text'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 58
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowercase_ = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 716
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = IFPipeline
lowercase : Tuple = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : int = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase__ ( self ) -> str:
return self._get_dummy_components()
def UpperCamelCase__ ( self , __A , __A=0 ) -> int:
if str(__A ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(__A )
else:
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(__A )
_lowerCAmelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ) -> str:
self._test_save_load_local()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
# if
_lowerCAmelCase =IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase =IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__A , tokenizer=__A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_lowerCAmelCase , _lowerCAmelCase =pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase =None
_lowerCAmelCase =None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase =IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase =IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase =IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase =IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__A , __A , __A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 58
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
if openai_config_file == "":
_lowerCAmelCase =OpenAIGPTConfig()
else:
_lowerCAmelCase =OpenAIGPTConfig.from_json_file(a__ )
_lowerCAmelCase =OpenAIGPTModel(a__ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(a__ , a__ , a__ )
# Save pytorch-model
_lowerCAmelCase =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_lowerCAmelCase =pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , a__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(a__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
lowercase_ = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 717
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =[0]
_lowerCAmelCase =[0]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
_lowerCAmelCase =[60]
_lowerCAmelCase =[10]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =3
_lowerCAmelCase =[1, 2, 3]
_lowerCAmelCase =[3, 2, 1]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 5 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =50
_lowerCAmelCase =[60, 100, 120]
_lowerCAmelCase =[10, 20, 30]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 220 )
if __name__ == "__main__":
unittest.main()
| 58
| 0
|
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
lowercase_ = '''src/transformers'''
# Matches is_xxx_available()
lowercase_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowercase_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowercase_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowercase_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase_ = re.compile(r'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase_ = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowercase_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowercase_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
lowercase_ = re.compile(r'''^\s*else:''')
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if _re_test_backend.search(a__ ) is None:
return None
_lowerCAmelCase =[b[0] for b in _re_backend.findall(a__ )]
backends.sort()
return "_and_".join(a__ )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
with open(a__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase =f.readlines()
_lowerCAmelCase =0
while line_index < len(a__ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(a__ ):
return None
# First grab the objects without a specific backend in _import_structure
_lowerCAmelCase =[]
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
_lowerCAmelCase =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(a__ ):
_lowerCAmelCase =_re_one_line_import_struct.search(a__ ).groups()[0]
_lowerCAmelCase =re.findall(r'\[([^\]]+)\]' , a__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
_lowerCAmelCase =_re_import_struct_key_value.search(a__ )
if single_line_import_search is not None:
_lowerCAmelCase =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(a__ ) > 0]
objects.extend(a__ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
_lowerCAmelCase ={'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowerCAmelCase =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCAmelCase =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCAmelCase =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
_lowerCAmelCase =lines[line_index]
if _re_import_struct_add_one.search(a__ ) is not None:
objects.append(_re_import_struct_add_one.search(a__ ).groups()[0] )
elif _re_import_struct_add_many.search(a__ ) is not None:
_lowerCAmelCase =_re_import_struct_add_many.search(a__ ).groups()[0].split(', ' )
_lowerCAmelCase =[obj[1:-1] for obj in imports if len(a__ ) > 0]
objects.extend(a__ )
elif _re_between_brackets.search(a__ ) is not None:
_lowerCAmelCase =_re_between_brackets.search(a__ ).groups()[0].split(', ' )
_lowerCAmelCase =[obj[1:-1] for obj in imports if len(a__ ) > 0]
objects.extend(a__ )
elif _re_quote_object.search(a__ ) is not None:
objects.append(_re_quote_object.search(a__ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 1_2 + '"' ):
objects.append(line[1_3:-3] )
line_index += 1
_lowerCAmelCase =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowerCAmelCase =[]
while (
line_index < len(a__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
_lowerCAmelCase =lines[line_index]
_lowerCAmelCase =_re_import.search(a__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowerCAmelCase ={'none': objects}
# Let's continue with backend-specific objects
while line_index < len(a__ ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowerCAmelCase =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCAmelCase =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCAmelCase =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
_lowerCAmelCase =lines[line_index]
_lowerCAmelCase =_re_import.search(a__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
_lowerCAmelCase =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def find_duplicates(a__ ):
return [k for k, v in collections.Counter(a__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowerCAmelCase =[]
for key in import_dict_objects.keys():
_lowerCAmelCase =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_lowerCAmelCase =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowerCAmelCase ='base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =[]
for root, _, files in os.walk(a__ ):
if "__init__.py" in files:
_lowerCAmelCase =os.path.join(a__ , '__init__.py' )
_lowerCAmelCase =parse_init(a__ )
if objects is not None:
_lowerCAmelCase =analyze_results(*a__ )
if len(a__ ) > 0:
_lowerCAmelCase =F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(a__ ) )
if len(a__ ) > 0:
raise ValueError('\n\n'.join(a__ ) )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =[]
for path, directories, files in os.walk(a__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(a__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(a__ ) / folder).glob('*.py' ) ) ) == 0:
continue
_lowerCAmelCase =str((Path(a__ ) / folder).relative_to(a__ ) )
_lowerCAmelCase =short_path.replace(os.path.sep , '.' )
submodules.append(a__ )
for fname in files:
if fname == "__init__.py":
continue
_lowerCAmelCase =str((Path(a__ ) / fname).relative_to(a__ ) )
_lowerCAmelCase =short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(a__ )
return submodules
lowercase_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def UpperCamelCase__ ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
_lowerCAmelCase =direct_transformers_import(a__ )
_lowerCAmelCase =set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(a__ , '__init__.py' ) , 'r' ) as f:
_lowerCAmelCase =f.read()
import_structure_keys.update(set(re.findall(r'import_structure\[\"([^\"]*)\"\]' , a__ ) ) )
_lowerCAmelCase =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(a__ ) > 0:
_lowerCAmelCase ='\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 718
|
'''simple docstring'''
lowercase_ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = ['pixel_values']
def __init__( self , __A = True , __A = None , __A = 0.9 , __A = PILImageResampling.BICUBIC , __A = True , __A = None , __A = 1 / 255 , __A = True , __A = True , __A = None , __A = None , **__A , ) -> None:
super().__init__(**__A )
_lowerCAmelCase =size if size is not None else {'shortest_edge': 224}
_lowerCAmelCase =get_size_dict(__A , default_to_square=__A )
_lowerCAmelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCAmelCase =get_size_dict(__A , param_name='crop_size' )
_lowerCAmelCase =do_resize
_lowerCAmelCase =size
_lowerCAmelCase =crop_pct
_lowerCAmelCase =resample
_lowerCAmelCase =do_center_crop
_lowerCAmelCase =crop_size
_lowerCAmelCase =do_rescale
_lowerCAmelCase =rescale_factor
_lowerCAmelCase =do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ ( self , __A , __A , __A = None , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> np.ndarray:
_lowerCAmelCase =get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
_lowerCAmelCase =int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
_lowerCAmelCase =int(size['height'] / crop_pct )
else:
_lowerCAmelCase =(int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(__A ) )
_lowerCAmelCase =get_resize_output_image_size(__A , size=__A , default_to_square=__A )
else:
if "shortest_edge" in size:
_lowerCAmelCase =get_resize_output_image_size(__A , size=size['shortest_edge'] , default_to_square=__A )
elif "height" in size and "width" in size:
_lowerCAmelCase =(size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(__A ) )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
_lowerCAmelCase =get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__A , size=(size['height'], size['width']) , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A , __A = None , **__A , ) -> int:
return rescale(__A , scale=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> PIL.Image.Image:
_lowerCAmelCase =do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase =crop_pct if crop_pct is not None else self.crop_pct
_lowerCAmelCase =resample if resample is not None else self.resample
_lowerCAmelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase =do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase =image_std if image_std is not None else self.image_std
_lowerCAmelCase =size if size is not None else self.size
_lowerCAmelCase =get_size_dict(__A , default_to_square=__A )
_lowerCAmelCase =crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase =get_size_dict(__A , param_name='crop_size' )
_lowerCAmelCase =make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase =[to_numpy_array(__A ) for image in images]
if do_resize:
_lowerCAmelCase =[self.resize(image=__A , size=__A , crop_pct=__A , resample=__A ) for image in images]
if do_center_crop:
_lowerCAmelCase =[self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
_lowerCAmelCase =[self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
_lowerCAmelCase =[self.normalize(image=__A , mean=__A , std=__A ) for image in images]
_lowerCAmelCase =[to_channel_dimension_format(__A , __A ) for image in images]
_lowerCAmelCase ={'pixel_values': images}
return BatchFeature(data=__A , tensor_type=__A )
| 719
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase_ = '''sshleifer/mar_enro_6_3_student'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=__A , )
_lowerCAmelCase =F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
MarianMTModel.from_pretrained(__A )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowerCAmelCase =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowerCAmelCase =F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowerCAmelCase =['finetune.py'] + bash_script.split() + args
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationModule.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowerCAmelCase ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowerCAmelCase =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_lowerCAmelCase =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
_lowerCAmelCase =bash_script.replace('--fp16' , '' )
_lowerCAmelCase =6
_lowerCAmelCase =(
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationDistiller.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowerCAmelCase =distill_main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 58
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=3 , __A=224 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , ) -> Any:
_lowerCAmelCase =size if size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =image_size
_lowerCAmelCase =min_resolution
_lowerCAmelCase =max_resolution
_lowerCAmelCase =do_resize
_lowerCAmelCase =size
_lowerCAmelCase =do_normalize
_lowerCAmelCase =image_mean
_lowerCAmelCase =image_std
def UpperCamelCase__ ( self ) -> Optional[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : Dict = ViTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =EfficientFormerImageProcessorTester(self )
@property
def UpperCamelCase__ ( self ) -> int:
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , 'image_mean' ) )
self.assertTrue(hasattr(__A , 'image_std' ) )
self.assertTrue(hasattr(__A , 'do_normalize' ) )
self.assertTrue(hasattr(__A , 'do_resize' ) )
self.assertTrue(hasattr(__A , 'size' ) )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
pass
def UpperCamelCase__ ( self ) -> List[str]:
# Initialize image_processor
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase =prepare_image_inputs(self.image_proc_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
_lowerCAmelCase =image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
_lowerCAmelCase =image_processor(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def UpperCamelCase__ ( self ) -> int:
# Initialize image_processor
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase =prepare_image_inputs(self.image_proc_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
_lowerCAmelCase =image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
_lowerCAmelCase =image_processor(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def UpperCamelCase__ ( self ) -> List[Any]:
# Initialize image_processor
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase =prepare_image_inputs(self.image_proc_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
_lowerCAmelCase =image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
_lowerCAmelCase =image_processor(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
| 720
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'sequence-classification'
def __init__( self , __A ) -> List[Any]:
if type(__A ) == dict:
_lowerCAmelCase =Namespace(**__A )
_lowerCAmelCase =glue_output_modes[hparams.task]
_lowerCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def UpperCamelCase__ ( self , **__A ) -> Any:
return self.model(**__A )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase =outputs[0]
_lowerCAmelCase =self.trainer.lr_schedulers[0]['scheduler']
_lowerCAmelCase ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.hparams
_lowerCAmelCase =processors[args.task]()
_lowerCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase =self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , __A )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_lowerCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_lowerCAmelCase =convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , __A )
torch.save(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A = False ) -> DataLoader:
_lowerCAmelCase ='dev' if mode == 'test' else mode
_lowerCAmelCase =self._feature_file(__A )
logger.info('Loading features from cached file %s' , __A )
_lowerCAmelCase =torch.load(__A )
_lowerCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def UpperCamelCase__ ( self , __A , __A ) -> List[str]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase , _lowerCAmelCase =outputs[:2]
_lowerCAmelCase =logits.detach().cpu().numpy()
_lowerCAmelCase =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __A ) -> tuple:
_lowerCAmelCase =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_lowerCAmelCase =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =np.squeeze(__A )
_lowerCAmelCase =np.concatenate([x['target'] for x in outputs] , axis=0 )
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_lowerCAmelCase =dict(results.items() )
_lowerCAmelCase =results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __A , __A ) -> Any:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'--max_seq_length' , default=128 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=__A , required=__A , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=__A , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
add_generic_args(a__ , os.getcwd() )
_lowerCAmelCase =GLUETransformer.add_model_specific_args(a__ , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase =os.path.join(
'./results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_lowerCAmelCase =GLUETransformer(a__ )
_lowerCAmelCase =generic_train(a__ , a__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=a__ ) )
_lowerCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
_lowerCAmelCase =parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(a__ )
DownloadCommand.register_subcommand(a__ )
EnvironmentCommand.register_subcommand(a__ )
RunCommand.register_subcommand(a__ )
ServeCommand.register_subcommand(a__ )
UserCommands.register_subcommand(a__ )
AddNewModelCommand.register_subcommand(a__ )
AddNewModelLikeCommand.register_subcommand(a__ )
LfsCommands.register_subcommand(a__ )
PTtoTFCommand.register_subcommand(a__ )
# Let's go
_lowerCAmelCase =parser.parse_args()
if not hasattr(a__ , 'func' ):
parser.print_help()
exit(1 )
# Run
_lowerCAmelCase =args.func(a__ )
service.run()
if __name__ == "__main__":
main()
| 721
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 1_000_000 ):
SCREAMING_SNAKE_CASE__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
SCREAMING_SNAKE_CASE__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
SCREAMING_SNAKE_CASE__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''')
| 59
|
from math import pow, sqrt
def SCREAMING_SNAKE_CASE__ ( *UpperCamelCase__: float ):
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ ) > 0 and all(value > 0.0 for value in values )
return result
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 59
| 1
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
for param in module.parameters():
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
SCREAMING_SNAKE_CASE__ = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = plt.imshow(UpperCamelCase__ )
fig.axes.get_xaxis().set_visible(UpperCamelCase__ )
fig.axes.get_yaxis().set_visible(UpperCamelCase__ )
plt.show()
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = datetime.now()
SCREAMING_SNAKE_CASE__ = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 59
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase_ = "nat"
lowerCamelCase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :List[Any] , __A :Optional[Any]=4 , __A :Any=3 , __A :Optional[int]=64 , __A :Optional[int]=[3, 4, 6, 5] , __A :Union[str, Any]=[2, 4, 8, 16] , __A :Optional[Any]=7 , __A :Optional[Any]=3.0 , __A :List[Any]=True , __A :int=0.0 , __A :Dict=0.0 , __A :Optional[Any]=0.1 , __A :str="gelu" , __A :Optional[Any]=0.0_2 , __A :Optional[int]=1E-5 , __A :Optional[int]=0.0 , __A :Optional[Any]=None , __A :Union[str, Any]=None , **__A :Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = len(__A )
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = kernel_size
SCREAMING_SNAKE_CASE__ = mlp_ratio
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = drop_path_rate
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE__ = int(embed_dim * 2 ** (len(__A ) - 1) )
SCREAMING_SNAKE_CASE__ = layer_scale_init_value
SCREAMING_SNAKE_CASE__ = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__A ) + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
| 59
| 1
|
import numpy as np
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: np.array ):
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: np.array ):
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(UpperCamelCase__ )
EnvironmentCommand.register_subcommand(UpperCamelCase__ )
TestCommand.register_subcommand(UpperCamelCase__ )
RunBeamCommand.register_subcommand(UpperCamelCase__ )
DummyDataCommand.register_subcommand(UpperCamelCase__ )
# Parse args
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = parser.parse_known_args()
if not hasattr(UpperCamelCase__ , """func""" ):
parser.print_help()
exit(1 )
SCREAMING_SNAKE_CASE__ = parse_unknown_args(UpperCamelCase__ )
# Run
SCREAMING_SNAKE_CASE__ = args.func(UpperCamelCase__ , **UpperCamelCase__ )
service.run()
if __name__ == "__main__":
main()
| 59
| 1
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
SCREAMING_SNAKE_CASE__ = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
f'''{test_file} instead.''' )
SCREAMING_SNAKE_CASE__ = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(f'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
f'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
SCREAMING_SNAKE_CASE__ = components[:-1] + [test_fn.replace(""".py""" , """""" )]
SCREAMING_SNAKE_CASE__ = """.""".join(UpperCamelCase__ )
return test_module_path
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = get_module_path(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = importlib.import_module(UpperCamelCase__ )
return test_module
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase__ , """all_model_classes""" , [] )
if len(UpperCamelCase__ ) > 0:
test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = get_test_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = test_class()
if hasattr(UpperCamelCase__ , """setUp""" ):
test.setUp()
SCREAMING_SNAKE_CASE__ = None
if hasattr(UpperCamelCase__ , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
SCREAMING_SNAKE_CASE__ = test.model_tester.__class__
return model_tester
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = get_test_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ):
SCREAMING_SNAKE_CASE__ = get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = []
for test_class in test_classes:
SCREAMING_SNAKE_CASE__ = get_model_tester_from_test_class(UpperCamelCase__ )
if tester_class is not None:
tester_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = get_test_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = {test_class: get_model_tester_from_test_class(UpperCamelCase__ ) for test_class in test_classes}
return test_tester_mapping
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = get_model_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = {
model_class: get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_test_mapping
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] ):
SCREAMING_SNAKE_CASE__ = get_model_classes(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = {
model_class: get_tester_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o.__name__
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return [to_json(UpperCamelCase__ ) for x in o]
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return {to_json(UpperCamelCase__ ): to_json(UpperCamelCase__ ) for k, v in o.items()}
else:
return o
| 59
|
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :List[Any] , *__A :Tuple , **__A :Dict ) -> None:
"""simple docstring"""
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 59
| 1
|
import warnings
from functools import wraps
from typing import Callable
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Callable ):
@wraps(UpperCamelCase__ )
def _inner_fn(*UpperCamelCase__: Dict , **UpperCamelCase__: Any ):
warnings.warn(
(f'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , UpperCamelCase__ , )
return fn(*UpperCamelCase__ , **UpperCamelCase__ )
return _inner_fn
| 59
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@require_torch
def _snake_case ( self :Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
SCREAMING_SNAKE_CASE__ = load_dataset("""ashraq/esc50""" )
SCREAMING_SNAKE_CASE__ = dataset["""train"""]["""audio"""][-1]["""array"""]
SCREAMING_SNAKE_CASE__ = audio_classifier(__A , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(__A ) , [{"""score""": 0.5_0_1, """label""": """Sound of a dog"""}, {"""score""": 0.4_9_9, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def _snake_case ( self :Dict ) -> List[str]:
"""simple docstring"""
pass
@slow
@require_torch
def _snake_case ( self :Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ = load_dataset("""ashraq/esc50""" )
SCREAMING_SNAKE_CASE__ = dataset["""train"""]["""audio"""][-1]["""array"""]
SCREAMING_SNAKE_CASE__ = audio_classifier(__A , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(__A ) , [
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
] , )
SCREAMING_SNAKE_CASE__ = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(__A ) , [
[
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(__A ) , [
[
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
pass
| 59
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "blip_text_model"
def __init__( self :Union[str, Any] , __A :int=3_0524 , __A :List[Any]=768 , __A :Optional[int]=768 , __A :str=3072 , __A :Tuple=768 , __A :Any=12 , __A :Dict=8 , __A :Union[str, Any]=512 , __A :int="gelu" , __A :int=1E-12 , __A :Tuple=0.0 , __A :Optional[int]=0.0 , __A :Optional[int]=0.0_2 , __A :Union[str, Any]=3_0522 , __A :List[Any]=2 , __A :str=0 , __A :int=102 , __A :Optional[int]=True , __A :List[Any]=True , **__A :Optional[int] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , sep_token_id=__A , **__A , )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = encoder_hidden_size
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = projection_dim
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = is_decoder
SCREAMING_SNAKE_CASE__ = use_cache
@classmethod
def _snake_case ( cls :Union[str, Any] , __A :Union[str, os.PathLike] , **__A :Union[str, Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(__A , **__A )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
SCREAMING_SNAKE_CASE__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "blip_vision_model"
def __init__( self :str , __A :List[str]=768 , __A :Any=3072 , __A :str=512 , __A :int=12 , __A :List[str]=12 , __A :Any=384 , __A :Optional[Any]=16 , __A :Union[str, Any]="gelu" , __A :List[str]=1E-5 , __A :Any=0.0 , __A :Any=1E-10 , **__A :Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = projection_dim
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = hidden_act
@classmethod
def _snake_case ( cls :Optional[int] , __A :Union[str, os.PathLike] , **__A :Union[str, Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
SCREAMING_SNAKE_CASE__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "blip"
lowerCamelCase_ = True
def __init__( self :Tuple , __A :List[str]=None , __A :Any=None , __A :Tuple=512 , __A :List[Any]=2.6_5_9_2 , __A :Any=256 , **__A :List[str] , ) -> List[str]:
"""simple docstring"""
super().__init__(**__A )
if text_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("""`text_config` is `None`. Initializing the `BlipTextConfig` with default values.""" )
if vision_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("""`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.""" )
SCREAMING_SNAKE_CASE__ = BlipTextConfig(**__A )
SCREAMING_SNAKE_CASE__ = BlipVisionConfig(**__A )
SCREAMING_SNAKE_CASE__ = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE__ = projection_dim
SCREAMING_SNAKE_CASE__ = logit_scale_init_value
SCREAMING_SNAKE_CASE__ = 1.0
SCREAMING_SNAKE_CASE__ = 0.0_2
SCREAMING_SNAKE_CASE__ = image_text_hidden_size
@classmethod
def _snake_case ( cls :int , __A :BlipTextConfig , __A :BlipVisionConfig , **__A :Any ) -> List[Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
| 59
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowerCamelCase = data_utils.TransfoXLTokenizer
_lowerCamelCase = data_utils.TransfoXLCorpus
_lowerCamelCase = data_utils
_lowerCamelCase = data_utils
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Tuple ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCamelCase__ , """rb""" ) as fp:
SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' )
SCREAMING_SNAKE_CASE__ = corpus.vocab.__dict__
torch.save(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(f'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
SCREAMING_SNAKE_CASE__ = os.path.abspath(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.abspath(UpperCamelCase__ )
print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
SCREAMING_SNAKE_CASE__ = TransfoXLConfig()
else:
SCREAMING_SNAKE_CASE__ = TransfoXLConfig.from_json_file(UpperCamelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ = TransfoXLLMHeadModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = load_tf_weights_in_transfo_xl(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f'''Save PyTorch model to {os.path.abspath(UpperCamelCase__ )}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
print(f'''Save configuration file to {os.path.abspath(UpperCamelCase__ )}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
_lowerCamelCase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 59
| 1
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_lowerCamelCase = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
if isinstance(UpperCamelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [image]
SCREAMING_SNAKE_CASE__ = [trans(img.convert("""RGB""" ) ) for img in image]
SCREAMING_SNAKE_CASE__ = torch.stack(UpperCamelCase__ )
return image
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Dict , __A :int , __A :List[str] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__A , scheduler=__A )
def _snake_case ( self :str , __A :Optional[Any] ) -> Any:
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _snake_case ( self :Union[str, Any] , __A :List[Any] , __A :Any , __A :Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = min(int(num_inference_steps * strength ) , __A )
SCREAMING_SNAKE_CASE__ = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _snake_case ( self :List[str] , __A :Optional[int] , __A :Any , __A :List[Any] , __A :Any , __A :Dict , __A :Union[str, Any]=None ) -> Any:
"""simple docstring"""
if not isinstance(__A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__A )}''' )
SCREAMING_SNAKE_CASE__ = image.to(device=__A , dtype=__A )
if isinstance(__A , __A ) and len(__A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = init_latents.shape
SCREAMING_SNAKE_CASE__ = randn_tensor(__A , generator=__A , device=__A , dtype=__A )
# get latents
print("""add noise to latents at timestep""" , __A )
SCREAMING_SNAKE_CASE__ = self.scheduler.add_noise(__A , __A , __A )
SCREAMING_SNAKE_CASE__ = init_latents
return latents
@torch.no_grad()
def __call__( self :Any , __A :Union[torch.FloatTensor, PIL.Image.Image] = None , __A :float = 0.8 , __A :int = 1 , __A :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __A :float = 0.0 , __A :int = 50 , __A :Optional[bool] = None , __A :Optional[str] = "pil" , __A :bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
self.check_inputs(__A )
# 2. Preprocess image
SCREAMING_SNAKE_CASE__ = preprocess(__A )
# 3. set timesteps
self.scheduler.set_timesteps(__A , device=self.device )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_timesteps(__A , __A , self.device )
SCREAMING_SNAKE_CASE__ = timesteps[:1].repeat(__A )
# 4. Prepare latent variables
SCREAMING_SNAKE_CASE__ = self.prepare_latents(__A , __A , __A , self.unet.dtype , self.device , __A )
SCREAMING_SNAKE_CASE__ = latents
# 5. Denoising loop
for t in self.progress_bar(__A ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE__ = self.unet(__A , __A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(
__A , __A , __A , eta=__A , use_clipped_model_output=__A , generator=__A , ).prev_sample
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__A )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__A )
| 59
|
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str ):
def get_masked_lm_array(UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = f'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_array(UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = f'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_layer_array(UpperCamelCase__: int , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = f'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_attention_layer_array(UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = f'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = array.reshape(UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
print(f'''Loading model based on config from {config_path}...''' )
SCREAMING_SNAKE_CASE__ = BertConfig.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = BertForMaskedLM(UpperCamelCase__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ = model.bert.encoder.layer[layer_index]
# Self-attention
SCREAMING_SNAKE_CASE__ = layer.attention.self
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_query_dense/bias""" , self_attn.query.bias.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_key_dense/bias""" , self_attn.key.bias.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
SCREAMING_SNAKE_CASE__ = layer.attention.output
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_output_dense/bias""" , self_output.dense.bias.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_attention_layer_norm/gamma""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_attention_layer_norm/beta""" )
# Intermediate
SCREAMING_SNAKE_CASE__ = layer.intermediate
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_intermediate_dense/kernel""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_intermediate_dense/bias""" )
# Output
SCREAMING_SNAKE_CASE__ = layer.output
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_dense/kernel""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_dense/bias""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_layer_norm/gamma""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_layer_norm/beta""" )
# Embeddings
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_position_embedding_layer/embeddings""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_type_embedding_layer/embeddings""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_embedding_norm_layer/gamma""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
SCREAMING_SNAKE_CASE__ = model.cls.predictions.transform
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""dense/kernel""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""dense/bias""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""layer_norm/gamma""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""layer_norm/beta""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""embedding_table""" )
# Pooling
SCREAMING_SNAKE_CASE__ = BertPooler(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_pooler_layer/kernel""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(UpperCamelCase__ )
# Integration test - should load without any errors ;)
SCREAMING_SNAKE_CASE__ = BertForMaskedLM.from_pretrained(UpperCamelCase__ )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowerCamelCase = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 59
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = KandinskyInpaintPipeline
lowerCamelCase_ = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
lowerCamelCase_ = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
lowerCamelCase_ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCamelCase_ = False
@property
def _snake_case ( self :Dict ) -> List[str]:
"""simple docstring"""
return 32
@property
def _snake_case ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
return self.time_input_dim
@property
def _snake_case ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _snake_case ( self :Optional[int] ) -> int:
"""simple docstring"""
return 100
@property
def _snake_case ( self :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def _snake_case ( self :List[str] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE__ = MultilingualCLIP(__A )
SCREAMING_SNAKE_CASE__ = text_encoder.eval()
return text_encoder
@property
def _snake_case ( self :List[str] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(**__A )
return model
@property
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = VQModel(**self.dummy_movq_kwargs )
return model
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ = self.dummy_unet
SCREAMING_SNAKE_CASE__ = self.dummy_movq
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__A , set_alpha_to_one=__A , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__A , )
SCREAMING_SNAKE_CASE__ = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _snake_case ( self :Dict , __A :Optional[int] , __A :Union[str, Any]=0 ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__A ) ).to(__A )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__A )
# create init_image
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__A ) ).to(__A )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE__ = np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = 0
if str(__A ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__A )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__A ).manual_seed(__A )
SCREAMING_SNAKE_CASE__ = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def _snake_case ( self :int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """cpu"""
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__A )
SCREAMING_SNAKE_CASE__ = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = pipe(**self.get_dummy_inputs(__A ) )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = pipe(
**self.get_dummy_inputs(__A ) , return_dict=__A , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _snake_case ( self :Dict ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
SCREAMING_SNAKE_CASE__ = np.ones((768, 768) , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = """a hat"""
SCREAMING_SNAKE_CASE__ = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__A )
SCREAMING_SNAKE_CASE__ = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pipe_prior(
__A , generator=__A , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
SCREAMING_SNAKE_CASE__ = pipeline(
__A , image=__A , mask_image=__A , image_embeds=__A , negative_image_embeds=__A , generator=__A , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__A , __A )
| 59
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCamelCase = '\\n Text data.\n Second line of data.'
_lowerCamelCase = 'file'
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
SCREAMING_SNAKE_CASE__ = bytes(UpperCamelCase__ , """utf-8""" )
with zstd.open(UpperCamelCase__ , """wb""" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
with open(os.path.join(tmpfs.local_root_dir , UpperCamelCase__ ) , """w""" ) as f:
f.write(UpperCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Dict , UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
SCREAMING_SNAKE_CASE__ = input_paths[compression_format]
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = DownloadConfig(cache_dir=UpperCamelCase__ , extract_compressed_file=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = """custom_cache"""
SCREAMING_SNAKE_CASE__ = """custom_extracted_dir"""
SCREAMING_SNAKE_CASE__ = tmp_path / """custom_extracted_path"""
if default_extracted:
SCREAMING_SNAKE_CASE__ = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , UpperCamelCase__ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
SCREAMING_SNAKE_CASE__ = xz_file
SCREAMING_SNAKE_CASE__ = (
DownloadConfig(extract_compressed_file=UpperCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=UpperCamelCase__ )
)
SCREAMING_SNAKE_CASE__ = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
assert Path(UpperCamelCase__ ).parent.parts[-2:] == expected
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] ):
# absolute path
SCREAMING_SNAKE_CASE__ = str(Path(UpperCamelCase__ ).resolve() )
assert cached_path(UpperCamelCase__ ) == text_file
# relative path
SCREAMING_SNAKE_CASE__ = str(Path(UpperCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(UpperCamelCase__ ) == text_file
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
# absolute path
SCREAMING_SNAKE_CASE__ = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
# relative path
SCREAMING_SNAKE_CASE__ = """./__missing_file__.txt"""
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ):
with pytest.raises(UpperCamelCase__ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(UpperCamelCase__ ):
http_get("""https://huggingface.co""" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(UpperCamelCase__ ):
ftp_get("""ftp://huggingface.co""" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(UpperCamelCase__ ):
fsspec_get("""s3://huggingface.co""" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
fsspec_head("""s3://huggingface.co""" )
| 59
| 1
|
import inspect
import unittest
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE__ = inspect.getmembers(__A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE__ = """k-diffusion"""
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE__ = """invisible-watermark"""
assert backend in deps, f'''{backend} is not in the deps table!'''
| 59
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_lowerCamelCase = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=UpperCamelCase__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=UpperCamelCase__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=UpperCamelCase__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=UpperCamelCase__ , default=1_000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=UpperCamelCase__ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=UpperCamelCase__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=UpperCamelCase__ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
def fn(UpperCamelCase__: Any ):
return tokenizer(examples["""text"""] )
return fn
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
SCREAMING_SNAKE_CASE__ = tf.train.Features(feature=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = tf.train.Example(features=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = example.SerializeToString()
records.append(UpperCamelCase__ )
return records
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
SCREAMING_SNAKE_CASE__ = min(len(UpperCamelCase__ ) , args.limit )
SCREAMING_SNAKE_CASE__ = dataset.select(range(UpperCamelCase__ ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
SCREAMING_SNAKE_CASE__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
SCREAMING_SNAKE_CASE__ = tokenize_function(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = dataset.map(UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(UpperCamelCase__: int ):
# Concatenate all texts.
SCREAMING_SNAKE_CASE__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
SCREAMING_SNAKE_CASE__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
SCREAMING_SNAKE_CASE__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
SCREAMING_SNAKE_CASE__ = {
k: [t[i : i + args.max_length] for i in range(0 , UpperCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
SCREAMING_SNAKE_CASE__ = dataset_tokenized.map(UpperCamelCase__ , batched=UpperCamelCase__ , batch_size=1_000 , num_proc=4 )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for shard in range(0 , len(UpperCamelCase__ ) , args.shard_size ):
SCREAMING_SNAKE_CASE__ = grouped_dataset[shard : shard + args.shard_size]
SCREAMING_SNAKE_CASE__ = len(dataset_snapshot["""input_ids"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
SCREAMING_SNAKE_CASE__ = get_serialized_examples(UpperCamelCase__ )
with tf.io.TFRecordWriter(UpperCamelCase__ ) as out_file:
for i in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE__ = serialized_examples[i]
out_file.write(UpperCamelCase__ )
print("""Wrote file {} containing {} records""".format(UpperCamelCase__ , UpperCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , """w""" ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = parse_args()
main(args)
| 59
| 1
|
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list , UpperCamelCase__: list ):
_validate_point(UpperCamelCase__ )
_validate_point(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ) ) )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[float] ):
if point:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for item in point:
if not isinstance(UpperCamelCase__ , (int, float) ):
SCREAMING_SNAKE_CASE__ = (
"""Expected a list of numbers as input, found """
f'''{type(UpperCamelCase__ ).__name__}'''
)
raise TypeError(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ = f'''Expected a list of numbers as input, found {type(UpperCamelCase__ ).__name__}'''
raise TypeError(UpperCamelCase__ )
else:
raise ValueError("""Missing an input""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list , UpperCamelCase__: list ):
_validate_point(UpperCamelCase__ )
_validate_point(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(UpperCamelCase__ , UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59
|
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list[float]] ):
SCREAMING_SNAKE_CASE__ = []
for data in source_data:
for i, el in enumerate(UpperCamelCase__ ):
if len(UpperCamelCase__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(UpperCamelCase__ ) )
return data_lists
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list[float]] , UpperCamelCase__: list[int] ):
SCREAMING_SNAKE_CASE__ = []
for dlist, weight in zip(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = min(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = max(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
SCREAMING_SNAKE_CASE__ = f'''Invalid weight of {weight:f} provided'''
raise ValueError(UpperCamelCase__ )
score_lists.append(UpperCamelCase__ )
return score_lists
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list[float]] ):
SCREAMING_SNAKE_CASE__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = final_scores[j] + ele
return final_scores
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list[float]] , UpperCamelCase__: list[int] ):
SCREAMING_SNAKE_CASE__ = get_data(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = calculate_each_score(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = generate_final_scores(UpperCamelCase__ )
# append scores to source data
for i, ele in enumerate(UpperCamelCase__ ):
source_data[i].append(UpperCamelCase__ )
return source_data
| 59
| 1
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Dict , **__A :List[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""bs4"""] )
super().__init__(**__A )
def _snake_case ( self :List[str] , __A :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
SCREAMING_SNAKE_CASE__ = parent.find_all(child.name , recursive=__A )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__A ) else next(i for i, s in enumerate(__A , 1 ) if s is child ) )
SCREAMING_SNAKE_CASE__ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _snake_case ( self :Optional[int] , __A :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = BeautifulSoup(__A , """html.parser""" )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for element in html_code.descendants:
if type(__A ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
SCREAMING_SNAKE_CASE__ = html.unescape(__A ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.xpath_soup(__A )
stringaxtag_seq.append(__A )
stringaxsubs_seq.append(__A )
if len(__A ) != len(__A ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(__A ) != len(__A ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _snake_case ( self :Optional[Any] , __A :str , __A :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """"""
for tagname, subs in zip(__A , __A ):
xpath += f'''/{tagname}'''
if subs != 0:
xpath += f'''[{subs}]'''
return xpath
def __call__( self :Dict , __A :List[Any] ) -> BatchFeature:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = False
# Check that strings has a valid type
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = True
elif isinstance(__A , (list, tuple) ):
if len(__A ) == 0 or isinstance(html_strings[0] , __A ):
SCREAMING_SNAKE_CASE__ = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
f'''but is of type {type(__A )}.''' )
SCREAMING_SNAKE_CASE__ = bool(isinstance(__A , (list, tuple) ) and (isinstance(html_strings[0] , __A )) )
if not is_batched:
SCREAMING_SNAKE_CASE__ = [html_strings]
# Get nodes + xpaths
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for html_string in html_strings:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_three_from_single(__A )
nodes.append(__A )
SCREAMING_SNAKE_CASE__ = []
for node, tag_list, sub_list in zip(__A , __A , __A ):
SCREAMING_SNAKE_CASE__ = self.construct_xpath(__A , __A )
xpath_strings.append(__A )
xpaths.append(__A )
# return as Dict
SCREAMING_SNAKE_CASE__ = {"""nodes""": nodes, """xpaths""": xpaths}
SCREAMING_SNAKE_CASE__ = BatchFeature(data=__A , tensor_type=__A )
return encoded_inputs
| 59
|
import warnings
from functools import wraps
from typing import Callable
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Callable ):
@wraps(UpperCamelCase__ )
def _inner_fn(*UpperCamelCase__: Dict , **UpperCamelCase__: Any ):
warnings.warn(
(f'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , UpperCamelCase__ , )
return fn(*UpperCamelCase__ , **UpperCamelCase__ )
return _inner_fn
| 59
| 1
|
import qiskit
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE__ = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
SCREAMING_SNAKE_CASE__ = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 59
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = RoCBertTokenizer
lowerCamelCase_ = None
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = filter_non_english
def _snake_case ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
for i, value in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(__A , __A , ensure_ascii=__A )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(__A , __A , ensure_ascii=__A )
def _snake_case ( self :List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(__A , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__A ) , [5, 6, 2, 5, 7, 8] )
def _snake_case ( self :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def _snake_case ( self :Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self :int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self :List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def _snake_case ( self :Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
SCREAMING_SNAKE_CASE__ = {}
for i, token in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = RoCBertWordpieceTokenizer(vocab=__A , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def _snake_case ( self :Any ) -> str:
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def _snake_case ( self :int ) -> str:
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def _snake_case ( self :int ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode_plus(
__A , return_attention_mask=__A , return_token_type_ids=__A , return_offsets_mapping=__A , add_special_tokens=__A , )
SCREAMING_SNAKE_CASE__ = tokenizer_r.do_lower_case if hasattr(__A , """do_lower_case""" ) else False
SCREAMING_SNAKE_CASE__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def _snake_case ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""的""", """人""", """有"""]
SCREAMING_SNAKE_CASE__ = """""".join(__A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.convert_ids_to_tokens(__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.convert_ids_to_tokens(__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE__ = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(__A )
]
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""你好""" , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""你是谁""" , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ = """你好,你是谁"""
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_shape_ids(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_pronunciation_ids(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.prepare_for_model(
__A , __A , __A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode_plus(__A , add_special_tokens=__A )
self.assertEqual(__A , __A )
| 59
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 59
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = original_name.split(""".""" )[0]
SCREAMING_SNAKE_CASE__ = key.split(""".""" )
SCREAMING_SNAKE_CASE__ = int(key_list[key_list.index(UpperCamelCase__ ) - 2] )
SCREAMING_SNAKE_CASE__ = int(key_list[key_list.index(UpperCamelCase__ ) - 1] )
SCREAMING_SNAKE_CASE__ = orig_block_num - offset
SCREAMING_SNAKE_CASE__ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = OrderedDict()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
SCREAMING_SNAKE_CASE__ = key.replace("""network""" , """poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
SCREAMING_SNAKE_CASE__ = key[: key.find("""proj""" )]
SCREAMING_SNAKE_CASE__ = key.replace(UpperCamelCase__ , f'''patch_embeddings.{total_embed_found}.''' )
SCREAMING_SNAKE_CASE__ = key.replace("""proj""" , """projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
SCREAMING_SNAKE_CASE__ = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """mlp.fc1""" , """output.conv1""" )
if "mlp.fc2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """mlp.fc2""" , """output.conv2""" )
if "norm1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """norm1""" , """before_norm""" )
if "norm2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """norm2""" , """after_norm""" )
if "layer_scale_1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """layer_scale_1""" , """layer_scale_1""" )
if "layer_scale_2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """layer_scale_2""" , """layer_scale_2""" )
if "head" in key:
SCREAMING_SNAKE_CASE__ = key.replace("""head""" , """classifier""" )
SCREAMING_SNAKE_CASE__ = value
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = PoolFormerConfig()
# set attributes based on model_name
SCREAMING_SNAKE_CASE__ = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ = model_name[-3:]
SCREAMING_SNAKE_CASE__ = 1_000
SCREAMING_SNAKE_CASE__ = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE__ = (1, 1_000)
# set config attributes
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
if size == "s12":
SCREAMING_SNAKE_CASE__ = [2, 2, 6, 2]
SCREAMING_SNAKE_CASE__ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "s24":
SCREAMING_SNAKE_CASE__ = [4, 4, 12, 4]
SCREAMING_SNAKE_CASE__ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "s36":
SCREAMING_SNAKE_CASE__ = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE__ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "m36":
SCREAMING_SNAKE_CASE__ = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE__ = [96, 192, 384, 768]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9_5
elif size == "m48":
SCREAMING_SNAKE_CASE__ = [8, 8, 24, 8]
SCREAMING_SNAKE_CASE__ = [96, 192, 384, 768]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9_5
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
SCREAMING_SNAKE_CASE__ = PoolFormerImageProcessor(crop_pct=UpperCamelCase__ )
# Prepare image
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase__ , map_location=torch.device("""cpu""" ) )
# rename keys
SCREAMING_SNAKE_CASE__ = rename_keys(UpperCamelCase__ )
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ = PoolFormerForImageClassification(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# Define image processor
SCREAMING_SNAKE_CASE__ = PoolFormerImageProcessor(crop_pct=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values
# forward pass
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_lowerCamelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 59
| 1
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_lowerCamelCase = TypeVar('T')
class UpperCamelCase_ ( Generic[T] ):
def __init__( self :Tuple , __A :list[T] , __A :Callable[[T, T], T] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = len(__A )
SCREAMING_SNAKE_CASE__ = [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE__ = fnc
self.build()
def _snake_case ( self :Optional[int] ) -> None:
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__ = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _snake_case ( self :Optional[Any] , __A :int , __A :T ) -> None:
"""simple docstring"""
p += self.N
SCREAMING_SNAKE_CASE__ = v
while p > 1:
SCREAMING_SNAKE_CASE__ = p // 2
SCREAMING_SNAKE_CASE__ = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _snake_case ( self :List[str] , __A :int , __A :int ) -> T | None: # noqa: E741
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = l + self.N, r + self.N
SCREAMING_SNAKE_CASE__ = None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE__ = self.st[l] if res is None else self.fn(__A , self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE__ = self.st[r] if res is None else self.fn(__A , self.st[r] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_lowerCamelCase = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
_lowerCamelCase = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
_lowerCamelCase = SegmentTree(test_array, min)
_lowerCamelCase = SegmentTree(test_array, max)
_lowerCamelCase = SegmentTree(test_array, lambda a, b: a + b)
def SCREAMING_SNAKE_CASE__ ( ):
for i in range(len(UpperCamelCase__ ) ):
for j in range(UpperCamelCase__ , len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE__ = reduce(UpperCamelCase__ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__ = reduce(UpperCamelCase__ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__ = reduce(lambda UpperCamelCase__ , UpperCamelCase__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(UpperCamelCase__ , UpperCamelCase__ )
assert max_range == max_segment_tree.query(UpperCamelCase__ , UpperCamelCase__ )
assert sum_range == sum_segment_tree.query(UpperCamelCase__ , UpperCamelCase__ )
test_all_segments()
for index, value in test_updates.items():
_lowerCamelCase = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 59
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "pix2struct_text_model"
lowerCamelCase_ = ["past_key_values"]
lowerCamelCase_ = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , __A :Any=5_0244 , __A :Optional[Any]=768 , __A :Tuple=64 , __A :List[str]=2048 , __A :int=12 , __A :str=12 , __A :Any=32 , __A :Tuple=128 , __A :int=0.1 , __A :str=1E-6 , __A :Optional[Any]=1.0 , __A :Union[str, Any]="gelu_new" , __A :Any=0 , __A :List[str]=False , __A :Optional[Any]=0 , __A :int=1 , __A :Optional[int]=False , __A :Optional[Any]=True , **__A :List[Any] , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = d_kv
SCREAMING_SNAKE_CASE__ = d_ff
SCREAMING_SNAKE_CASE__ = num_layers
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ = dropout_rate
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = decoder_start_token_id
# for backwards compatibility
SCREAMING_SNAKE_CASE__ = dense_act_fn
super().__init__(
pad_token_id=__A , eos_token_id=__A , decoder_start_token_id=__A , tie_word_embeddings=__A , is_decoder=__A , **__A , )
@classmethod
def _snake_case ( cls :Optional[int] , __A :Union[str, os.PathLike] , **__A :Optional[int] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(__A , **__A )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
SCREAMING_SNAKE_CASE__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "pix2struct_vision_model"
def __init__( self :Optional[int] , __A :int=768 , __A :Optional[Any]=768 , __A :Union[str, Any]=2048 , __A :int=64 , __A :Union[str, Any]=12 , __A :str=12 , __A :Any="gelu_new" , __A :List[Any]=1E-6 , __A :Dict=0.0 , __A :int=0.0 , __A :int=1E-10 , __A :Dict=1.0 , __A :int=4096 , __A :int=32 , __A :int=128 , **__A :Tuple , ) -> str:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = patch_embed_hidden_size
SCREAMING_SNAKE_CASE__ = d_ff
SCREAMING_SNAKE_CASE__ = dropout_rate
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = dense_act_fn
SCREAMING_SNAKE_CASE__ = seq_len
SCREAMING_SNAKE_CASE__ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ = d_kv
@classmethod
def _snake_case ( cls :str , __A :Union[str, os.PathLike] , **__A :str ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
SCREAMING_SNAKE_CASE__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "pix2struct"
lowerCamelCase_ = True
def __init__( self :str , __A :Optional[Any]=None , __A :List[str]=None , __A :Optional[Any]=1.0 , __A :Optional[Any]=0.0_2 , __A :Any=False , __A :Tuple=False , __A :Any=True , **__A :Dict , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(tie_word_embeddings=__A , is_encoder_decoder=__A , **__A )
if text_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE__ = PixaStructTextConfig(**__A )
SCREAMING_SNAKE_CASE__ = PixaStructVisionConfig(**__A )
SCREAMING_SNAKE_CASE__ = self.text_config.decoder_start_token_id
SCREAMING_SNAKE_CASE__ = self.text_config.pad_token_id
SCREAMING_SNAKE_CASE__ = self.text_config.eos_token_id
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = self.initializer_range
SCREAMING_SNAKE_CASE__ = self.initializer_range
SCREAMING_SNAKE_CASE__ = is_vqa
@classmethod
def _snake_case ( cls :Union[str, Any] , __A :PixaStructTextConfig , __A :PixaStructVisionConfig , **__A :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def _snake_case ( self :str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
| 59
| 1
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class UpperCamelCase_ :
def __init__( self :str , __A :List[str] , __A :Union[str, Any]=99 , __A :Optional[int]=13 , __A :List[Any]=7 , __A :List[Any]=9 , __A :List[str]=True , __A :List[Any]=True , __A :List[Any]=False , __A :Tuple=32 , __A :int=5 , __A :Optional[int]=4 , __A :Dict=37 , __A :Union[str, Any]=8 , __A :Dict=0.1 , __A :Dict=0.0_0_2 , __A :Dict=1 , __A :Union[str, Any]=0 , __A :int=0 , __A :List[str]=None , __A :Optional[Any]=None , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = encoder_seq_length
SCREAMING_SNAKE_CASE__ = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_attention_mask
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = d_ff
SCREAMING_SNAKE_CASE__ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ = dropout_rate
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = pad_token_id
SCREAMING_SNAKE_CASE__ = decoder_start_token_id
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = decoder_layers
def _snake_case ( self :Any ) -> str:
"""simple docstring"""
return TaConfig.from_pretrained("""google/umt5-base""" )
def _snake_case ( self :Tuple , __A :int , __A :str , __A :List[str] , __A :Optional[Any]=None , __A :str=None , __A :int=None , __A :Optional[int]=None , __A :Dict=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE__ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__A )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__A )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE__ = input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ = self.get_config()
SCREAMING_SNAKE_CASE__ = config.num_attention_heads
SCREAMING_SNAKE_CASE__ = self.prepare_inputs_dict(__A , __A , __A )
return config, input_dict
def _snake_case ( self :str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self :Any ) -> Optional[Any]:
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self :Dict ) -> Optional[Any]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Any , __A :Optional[Any] , __A :int , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UMTaModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
input_ids=__A , decoder_input_ids=__A , attention_mask=__A , decoder_attention_mask=__A , )
SCREAMING_SNAKE_CASE__ = model(input_ids=__A , decoder_input_ids=__A )
SCREAMING_SNAKE_CASE__ = result.last_hidden_state
SCREAMING_SNAKE_CASE__ = result.past_key_values
SCREAMING_SNAKE_CASE__ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _snake_case ( self :Optional[int] , __A :List[Any] , __A :str , __A :str , __A :Dict , __A :Optional[int] , __A :Any , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
SCREAMING_SNAKE_CASE__ = model(__A , use_cache=__A )
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ = model(__A )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE__ = model(__A , past_key_values=__A )["""last_hidden_state"""]
# select random slice
SCREAMING_SNAKE_CASE__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1E-3 ) )
def _snake_case ( self :str , __A :Union[str, Any] , __A :Optional[int] , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UMTaModel(config=__A ).to(__A ).half().eval()
SCREAMING_SNAKE_CASE__ = model(**__A )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowerCamelCase_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowerCamelCase_ = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowerCamelCase_ = [0.8, 0.9]
def _snake_case ( self :List[str] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _snake_case ( self :Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=__A , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _snake_case ( self :Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = config_and_inputs[0]
SCREAMING_SNAKE_CASE__ = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
SCREAMING_SNAKE_CASE__ = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__A ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__A ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__A ),
}
for attn_name, (name, mask) in zip(__A , head_masking.items() ):
SCREAMING_SNAKE_CASE__ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
SCREAMING_SNAKE_CASE__ = torch.ones(
config.num_decoder_layers , config.num_heads , device=__A )
SCREAMING_SNAKE_CASE__ = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__A , return_dict_in_generate=__A , **__A , )
# We check the state of decoder_attentions and cross_attentions just from the last step
SCREAMING_SNAKE_CASE__ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _snake_case ( self :List[Any] ) -> int:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _snake_case ( self :Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__A ).to(__A )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__A , legacy=__A )
SCREAMING_SNAKE_CASE__ = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
SCREAMING_SNAKE_CASE__ = tokenizer(__A , return_tensors="""pt""" , padding=__A ).input_ids
# fmt: off
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A , __A )
SCREAMING_SNAKE_CASE__ = model.generate(input_ids.to(__A ) )
SCREAMING_SNAKE_CASE__ = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__A )
self.assertEqual(__A , __A )
| 59
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def _snake_case ( self :Optional[Any] ) -> Tuple:
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices.''' )
SCREAMING_SNAKE_CASE__ = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
@require_multi_gpu
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices.''' )
SCREAMING_SNAKE_CASE__ = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
@require_multi_gpu
def _snake_case ( self :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
@require_multi_gpu
def _snake_case ( self :Optional[int] ) -> str:
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
SCREAMING_SNAKE_CASE__ = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(__A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase = Accelerator()
_lowerCamelCase = (accelerator.state.process_index + 2, 10)
_lowerCamelCase = torch.randint(0, 10, shape).to(accelerator.device)
_lowerCamelCase = ''
_lowerCamelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCamelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCamelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 59
| 1
|
from typing import List
import numpy as np
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: dict ):
SCREAMING_SNAKE_CASE__ = {key: len(UpperCamelCase__ ) for key, value in gen_kwargs.items() if isinstance(UpperCamelCase__ , UpperCamelCase__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"""Sharding is ambiguous for this dataset: """
+ """we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"""
+ """\n""".join(f'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ """\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, """
+ """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."""
) )
SCREAMING_SNAKE_CASE__ = max(lists_lengths.values() , default=0 )
return max(1 , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = []
for group_idx in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
SCREAMING_SNAKE_CASE__ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
SCREAMING_SNAKE_CASE__ = range(UpperCamelCase__ , start + num_shards_to_add )
shards_indices_per_group.append(UpperCamelCase__ )
return shards_indices_per_group
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: dict , UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = _number_of_shards_in_gen_kwargs(UpperCamelCase__ )
if num_shards == 1:
return [dict(UpperCamelCase__ )]
else:
SCREAMING_SNAKE_CASE__ = _distribute_shards(num_shards=UpperCamelCase__ , max_num_jobs=UpperCamelCase__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(UpperCamelCase__ ) )
]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[dict] ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , UpperCamelCase__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: np.random.Generator , UpperCamelCase__: dict ):
SCREAMING_SNAKE_CASE__ = {len(UpperCamelCase__ ) for value in gen_kwargs.values() if isinstance(UpperCamelCase__ , UpperCamelCase__ )}
SCREAMING_SNAKE_CASE__ = {}
for size in list_sizes:
SCREAMING_SNAKE_CASE__ = list(range(UpperCamelCase__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
SCREAMING_SNAKE_CASE__ = dict(UpperCamelCase__ )
for key, value in shuffled_kwargs.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = [value[i] for i in indices_per_size[len(UpperCamelCase__ )]]
return shuffled_kwargs
| 59
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['LayoutLMv3FeatureExtractor']
_lowerCamelCase = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 59
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "sew-d"
def __init__( self :Dict , __A :Dict=32 , __A :str=768 , __A :int=12 , __A :str=12 , __A :str=3072 , __A :Optional[Any]=2 , __A :int=512 , __A :Optional[Any]=256 , __A :List[Any]=True , __A :Union[str, Any]=True , __A :int=("p2c", "c2p") , __A :List[str]="layer_norm" , __A :Optional[int]="gelu_python" , __A :Dict=0.1 , __A :List[Any]=0.1 , __A :List[Any]=0.1 , __A :Optional[int]=0.0 , __A :Any=0.1 , __A :Optional[int]=0.0_2 , __A :Union[str, Any]=1E-7 , __A :List[Any]=1E-5 , __A :Optional[Any]="group" , __A :Union[str, Any]="gelu" , __A :Dict=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __A :Tuple=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __A :Union[str, Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __A :int=False , __A :Tuple=128 , __A :List[str]=16 , __A :Optional[int]=True , __A :Optional[Any]=0.0_5 , __A :str=10 , __A :Optional[int]=2 , __A :int=0.0 , __A :List[Any]=10 , __A :Optional[int]=0 , __A :List[Any]="mean" , __A :Optional[int]=False , __A :str=False , __A :Union[str, Any]=256 , __A :Any=0 , __A :Any=1 , __A :Any=2 , **__A :str , ) -> int:
"""simple docstring"""
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = feat_extract_norm
SCREAMING_SNAKE_CASE__ = feat_extract_activation
SCREAMING_SNAKE_CASE__ = list(__A )
SCREAMING_SNAKE_CASE__ = list(__A )
SCREAMING_SNAKE_CASE__ = list(__A )
SCREAMING_SNAKE_CASE__ = conv_bias
SCREAMING_SNAKE_CASE__ = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = squeeze_factor
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = position_buckets
SCREAMING_SNAKE_CASE__ = share_att_key
SCREAMING_SNAKE_CASE__ = relative_attention
SCREAMING_SNAKE_CASE__ = norm_rel_ebd
SCREAMING_SNAKE_CASE__ = list(__A )
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = feat_proj_dropout
SCREAMING_SNAKE_CASE__ = final_dropout
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = feature_layer_norm_eps
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ = apply_spec_augment
SCREAMING_SNAKE_CASE__ = mask_time_prob
SCREAMING_SNAKE_CASE__ = mask_time_length
SCREAMING_SNAKE_CASE__ = mask_time_min_masks
SCREAMING_SNAKE_CASE__ = mask_feature_prob
SCREAMING_SNAKE_CASE__ = mask_feature_length
SCREAMING_SNAKE_CASE__ = mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE__ = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ = ctc_zero_infinity
# sequence classification
SCREAMING_SNAKE_CASE__ = use_weighted_layer_sum
SCREAMING_SNAKE_CASE__ = classifier_proj_size
@property
def _snake_case ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 59
|
import inspect
import unittest
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE__ = inspect.getmembers(__A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE__ = """k-diffusion"""
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE__ = """invisible-watermark"""
assert backend in deps, f'''{backend} is not in the deps table!'''
| 59
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 59
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , UpperCamelCase__ , )
if isinstance(UpperCamelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image[0].size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE__ = np.concatenate(UpperCamelCase__ , axis=0 )
SCREAMING_SNAKE_CASE__ = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE__ = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCamelCase__ , dim=0 )
return image
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(UpperCamelCase__ , torch.Tensor ):
return mask
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = mask[0].size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE__ = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE__ = np.concatenate(UpperCamelCase__ , axis=0 )
SCREAMING_SNAKE_CASE__ = mask.astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
elif isinstance(mask[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCamelCase__ , dim=0 )
return mask
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
lowerCamelCase_ = 42
def __init__( self :Any , __A :List[Any] , __A :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self :str , __A :Union[torch.Tensor, PIL.Image.Image] , __A :Union[torch.Tensor, PIL.Image.Image] , __A :int = 250 , __A :float = 0.0 , __A :int = 10 , __A :int = 10 , __A :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __A :Optional[str] = "pil" , __A :bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = image
SCREAMING_SNAKE_CASE__ = _preprocess_image(__A )
SCREAMING_SNAKE_CASE__ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = _preprocess_mask(__A )
SCREAMING_SNAKE_CASE__ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__A , __A ) and len(__A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = original_image.shape
SCREAMING_SNAKE_CASE__ = randn_tensor(__A , generator=__A , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__A , __A , __A , self.device )
SCREAMING_SNAKE_CASE__ = eta
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE__ = generator[0] if isinstance(__A , __A ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE__ = self.unet(__A , __A ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__A , __A , __A , __A , __A , __A ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE__ = self.scheduler.undo_step(__A , __A , __A )
SCREAMING_SNAKE_CASE__ = t
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
| 59
| 1
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: BertModel , UpperCamelCase__: str , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""")
SCREAMING_SNAKE_CASE__ = (
("""layer.""", """layer_"""),
("""word_embeddings.weight""", """word_embeddings"""),
("""position_embeddings.weight""", """position_embeddings"""),
("""token_type_embeddings.weight""", """token_type_embeddings"""),
(""".""", """/"""),
("""LayerNorm/weight""", """LayerNorm/gamma"""),
("""LayerNorm/bias""", """LayerNorm/beta"""),
("""weight""", """kernel"""),
)
if not os.path.isdir(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = model.state_dict()
def to_tf_var_name(UpperCamelCase__: str ):
for patt, repl in iter(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = name.replace(UpperCamelCase__ , UpperCamelCase__ )
return f'''bert/{name}'''
def create_tf_var(UpperCamelCase__: np.ndarray , UpperCamelCase__: str , UpperCamelCase__: tf.Session ):
SCREAMING_SNAKE_CASE__ = tf.dtypes.as_dtype(tensor.dtype )
SCREAMING_SNAKE_CASE__ = tf.get_variable(dtype=UpperCamelCase__ , shape=tensor.shape , name=UpperCamelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(UpperCamelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
SCREAMING_SNAKE_CASE__ = to_tf_var_name(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
SCREAMING_SNAKE_CASE__ = torch_tensor.T
SCREAMING_SNAKE_CASE__ = create_tf_var(tensor=UpperCamelCase__ , name=UpperCamelCase__ , session=UpperCamelCase__ )
tf.keras.backend.set_value(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = session.run(UpperCamelCase__ )
print(f'''Successfully created {tf_name}: {np.allclose(UpperCamelCase__ , UpperCamelCase__ )}''' )
SCREAMING_SNAKE_CASE__ = tf.train.Saver(tf.trainable_variables() )
saver.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int]=None ):
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""" , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""Directory in which to save tensorflow model""" )
SCREAMING_SNAKE_CASE__ = parser.parse_args(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=UpperCamelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 59
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = OpenAIGPTTokenizer
lowerCamelCase_ = OpenAIGPTTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = False
def _snake_case ( self :Optional[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE__ = dict(zip(__A , range(len(__A ) ) ) )
SCREAMING_SNAKE_CASE__ = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__A ) )
def _snake_case ( self :Union[str, Any] , __A :str ) -> List[Any]:
"""simple docstring"""
return "lower newer", "lower newer"
def _snake_case ( self :Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE__ = """lower"""
SCREAMING_SNAKE_CASE__ = ["""low""", """er</w>"""]
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = tokens + ["""<unk>"""]
SCREAMING_SNAKE_CASE__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def _snake_case ( self :Optional[Any] , __A :Optional[Any]=15 ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
# Simple input
SCREAMING_SNAKE_CASE__ = """This is a simple input"""
SCREAMING_SNAKE_CASE__ = ["""This is a simple input 1""", """This is a simple input 2"""]
SCREAMING_SNAKE_CASE__ = ("""This is a simple input""", """This is a pair""")
SCREAMING_SNAKE_CASE__ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="""max_length""" )
# Simple input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="""max_length""" )
# Simple input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="""max_length""" , )
# Pair input
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="""max_length""" )
# Pair input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="""max_length""" )
# Pair input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="""max_length""" , )
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase__ ):
pass
| 59
| 1
|
import random
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: str , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = a[left_index]
SCREAMING_SNAKE_CASE__ = left_index + 1
for j in range(left_index + 1 , UpperCamelCase__ ):
if a[j] < pivot:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = a[i], a[j]
i += 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = a[i - 1], a[left_index]
return i - 1
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: str ):
if left < right:
SCREAMING_SNAKE_CASE__ = random.randint(UpperCamelCase__ , right - 1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
SCREAMING_SNAKE_CASE__ = partition(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
quick_sort_random(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
UpperCamelCase__ , pivot_index + 1 , UpperCamelCase__ ) # recursive quicksort to the right of the pivot point
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by a comma:\n""" ).strip()
SCREAMING_SNAKE_CASE__ = [int(UpperCamelCase__ ) for item in user_input.split(""",""" )]
quick_sort_random(UpperCamelCase__ , 0 , len(UpperCamelCase__ ) )
print(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 59
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase_ = Features({"image": Image()} )
lowerCamelCase_ = Features({"labels": ClassLabel} )
lowerCamelCase_ = "image"
lowerCamelCase_ = "labels"
def _snake_case ( self :List[str] , __A :Tuple ) -> Tuple:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ = self.label_schema.copy()
SCREAMING_SNAKE_CASE__ = features[self.label_column]
SCREAMING_SNAKE_CASE__ = label_schema
return task_template
@property
def _snake_case ( self :Dict ) -> Dict[str, str]:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 59
| 1
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self :List[str] , __A :List[str] , __A :Union[str, Any]=7 , __A :Union[str, Any]=3 , __A :List[Any]=18 , __A :Optional[int]=30 , __A :Tuple=400 , __A :Optional[Any]=True , __A :List[Any]=None , __A :Tuple=True , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = size if size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
def _snake_case ( self :Any ) -> List[str]:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = ImageGPTImageProcessor if is_vision_available() else None
def _snake_case ( self :Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ImageGPTImageProcessingTester(self )
@property
def _snake_case ( self :Tuple ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self :str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """clusters""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
def _snake_case ( self :Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def _snake_case ( self :List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(__A , obj[key] ) )
else:
self.assertEqual(obj[key] , __A )
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ = os.path.join(__A , """image_processor.json""" )
image_processor_first.to_json_file(__A )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_json_file(__A ).to_dict()
SCREAMING_SNAKE_CASE__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__A , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __A )
def _snake_case ( self :int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(__A )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_pretrained(__A ).to_dict()
SCREAMING_SNAKE_CASE__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__A , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __A )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def _snake_case ( self :List[Any] ) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
SCREAMING_SNAKE_CASE__ = Image.open(dataset[4]["""file"""] )
SCREAMING_SNAKE_CASE__ = Image.open(dataset[5]["""file"""] )
SCREAMING_SNAKE_CASE__ = [imagea, imagea]
return images
@require_vision
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self :Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
SCREAMING_SNAKE_CASE__ = prepare_images()
# test non-batched
SCREAMING_SNAKE_CASE__ = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
SCREAMING_SNAKE_CASE__ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , __A )
# test batched
SCREAMING_SNAKE_CASE__ = image_processing(__A , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
SCREAMING_SNAKE_CASE__ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , __A )
| 59
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 59
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 59
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "timm_backbone"
def __init__( self :Union[str, Any] , __A :str=None , __A :Union[str, Any]=3 , __A :str=True , __A :Any=True , __A :Optional[Any]=None , **__A :List[str] , ) -> Tuple:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ = backbone
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = features_only
SCREAMING_SNAKE_CASE__ = use_pretrained_backbone
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = out_indices if out_indices is not None else (-1,)
| 59
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 59
|
from math import pow, sqrt
def SCREAMING_SNAKE_CASE__ ( *UpperCamelCase__: float ):
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ ) > 0 and all(value > 0.0 for value in values )
return result
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 59
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 59
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase_ = "nat"
lowerCamelCase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :List[Any] , __A :Optional[Any]=4 , __A :Any=3 , __A :Optional[int]=64 , __A :Optional[int]=[3, 4, 6, 5] , __A :Union[str, Any]=[2, 4, 8, 16] , __A :Optional[Any]=7 , __A :Optional[Any]=3.0 , __A :List[Any]=True , __A :int=0.0 , __A :Dict=0.0 , __A :Optional[Any]=0.1 , __A :str="gelu" , __A :Optional[Any]=0.0_2 , __A :Optional[int]=1E-5 , __A :Optional[int]=0.0 , __A :Optional[Any]=None , __A :Union[str, Any]=None , **__A :Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = len(__A )
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = kernel_size
SCREAMING_SNAKE_CASE__ = mlp_ratio
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = drop_path_rate
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE__ = int(embed_dim * 2 ** (len(__A ) - 1) )
SCREAMING_SNAKE_CASE__ = layer_scale_init_value
SCREAMING_SNAKE_CASE__ = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__A ) + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
| 59
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 59
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(UpperCamelCase__ )
EnvironmentCommand.register_subcommand(UpperCamelCase__ )
TestCommand.register_subcommand(UpperCamelCase__ )
RunBeamCommand.register_subcommand(UpperCamelCase__ )
DummyDataCommand.register_subcommand(UpperCamelCase__ )
# Parse args
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = parser.parse_known_args()
if not hasattr(UpperCamelCase__ , """func""" ):
parser.print_help()
exit(1 )
SCREAMING_SNAKE_CASE__ = parse_unknown_args(UpperCamelCase__ )
# Run
SCREAMING_SNAKE_CASE__ = args.func(UpperCamelCase__ , **UpperCamelCase__ )
service.run()
if __name__ == "__main__":
main()
| 59
| 1
|
_lowerCamelCase = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_lowerCamelCase = [{'type': 'code', 'content': INSTALL_CONTENT}]
_lowerCamelCase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 59
|
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :List[Any] , *__A :Tuple , **__A :Dict ) -> None:
"""simple docstring"""
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 59
| 1
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowerCamelCase = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: torch.nn.Module , UpperCamelCase__: BnbQuantizationConfig , UpperCamelCase__: Union[str, os.PathLike] = None , UpperCamelCase__: Optional[Dict[str, Union[int, str, torch.device]]] = None , UpperCamelCase__: Optional[List[str]] = None , UpperCamelCase__: Optional[Dict[Union[int, str], Union[int, str]]] = None , UpperCamelCase__: Optional[Union[str, os.PathLike]] = None , UpperCamelCase__: bool = False , ):
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
SCREAMING_SNAKE_CASE__ = []
# custom device map
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE__ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE__ = get_keys_to_not_convert(UpperCamelCase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(UpperCamelCase__ )
# compatibility with peft
SCREAMING_SNAKE_CASE__ = load_in_abit
SCREAMING_SNAKE_CASE__ = load_in_abit
SCREAMING_SNAKE_CASE__ = get_parameter_device(UpperCamelCase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
SCREAMING_SNAKE_CASE__ = replace_with_bnb_layers(UpperCamelCase__ , UpperCamelCase__ , modules_to_not_convert=UpperCamelCase__ )
# convert param to the right dtype
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE__ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(UpperCamelCase__ ):
param.to(UpperCamelCase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE__ = replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , modules_to_not_convert=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = get_quantized_model_device_map(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , max_memory=UpperCamelCase__ , no_split_module_classes=UpperCamelCase__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=UpperCamelCase__ , offload_state_dict=UpperCamelCase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(UpperCamelCase__ , device_map=UpperCamelCase__ , offload_dir=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: str=None , UpperCamelCase__: int=None ):
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
SCREAMING_SNAKE_CASE__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = special_dtypes
SCREAMING_SNAKE_CASE__ = no_split_module_classes
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE__ = get_balanced_memory(
UpperCamelCase__ , low_zero=(device_map == """balanced_low_0""") , max_memory=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ = max_memory
SCREAMING_SNAKE_CASE__ = infer_auto_device_map(UpperCamelCase__ , **UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int , UpperCamelCase__: List[Any]=None , UpperCamelCase__: List[str]=None ):
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = _replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=None , ):
SCREAMING_SNAKE_CASE__ = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ = []
current_key_name.append(UpperCamelCase__ )
if isinstance(UpperCamelCase__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE__ = """.""".join(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=UpperCamelCase__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
SCREAMING_SNAKE_CASE__ = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE__ = module.bias.data
bnb_module.requires_grad_(UpperCamelCase__ )
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = _replace_with_bnb_layers(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
# Create a copy of the model
with init_empty_weights():
SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE__ = find_tied_parameters(UpperCamelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ , [] )
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ = False
if hasattr(UpperCamelCase__ , """base_model_prefix""" ):
SCREAMING_SNAKE_CASE__ = not hasattr(UpperCamelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ = list(model.named_children() )
SCREAMING_SNAKE_CASE__ = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = list(set(UpperCamelCase__ ) ) + list(UpperCamelCase__ )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ = name.replace(UpperCamelCase__ , """""" )
filtered_module_names.append(UpperCamelCase__ )
return filtered_module_names
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
for m in model.modules():
if isinstance(UpperCamelCase__ , bnb.nn.Linearabit ):
return True
return False
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: nn.Module ):
return next(parameter.parameters() ).device
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Dict ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(UpperCamelCase__ , UpperCamelCase__ , 0 , dtype=UpperCamelCase__ , value=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = param_name
SCREAMING_SNAKE_CASE__ = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ = new_module
SCREAMING_SNAKE_CASE__ = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE__ = False
offload_weight(module._parameters[tensor_name] , UpperCamelCase__ , UpperCamelCase__ , index=UpperCamelCase__ )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , UpperCamelCase__ , index=UpperCamelCase__ , )
else:
offload_weight(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , index=UpperCamelCase__ )
offload_weight(UpperCamelCase__ , param_name.replace("""weight""" , """SCB""" ) , UpperCamelCase__ , index=UpperCamelCase__ )
set_module_tensor_to_device(UpperCamelCase__ , UpperCamelCase__ , """meta""" , dtype=UpperCamelCase__ , value=torch.empty(*param.size() ) )
| 59
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@require_torch
def _snake_case ( self :Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
SCREAMING_SNAKE_CASE__ = load_dataset("""ashraq/esc50""" )
SCREAMING_SNAKE_CASE__ = dataset["""train"""]["""audio"""][-1]["""array"""]
SCREAMING_SNAKE_CASE__ = audio_classifier(__A , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(__A ) , [{"""score""": 0.5_0_1, """label""": """Sound of a dog"""}, {"""score""": 0.4_9_9, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def _snake_case ( self :Dict ) -> List[str]:
"""simple docstring"""
pass
@slow
@require_torch
def _snake_case ( self :Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ = load_dataset("""ashraq/esc50""" )
SCREAMING_SNAKE_CASE__ = dataset["""train"""]["""audio"""][-1]["""array"""]
SCREAMING_SNAKE_CASE__ = audio_classifier(__A , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(__A ) , [
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
] , )
SCREAMING_SNAKE_CASE__ = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(__A ) , [
[
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(__A ) , [
[
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
pass
| 59
| 1
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["note_seq"]
def __init__( self :Any , *__A :Union[str, Any] , **__A :Optional[Any] ) -> Any:
"""simple docstring"""
requires_backends(self , ["""note_seq"""] )
@classmethod
def _snake_case ( cls :Union[str, Any] , *__A :Optional[int] , **__A :Optional[int] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
@classmethod
def _snake_case ( cls :List[str] , *__A :str , **__A :Optional[int] ) -> Dict:
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
| 59
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowerCamelCase = data_utils.TransfoXLTokenizer
_lowerCamelCase = data_utils.TransfoXLCorpus
_lowerCamelCase = data_utils
_lowerCamelCase = data_utils
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Any , UpperCamelCase__: Any , UpperCamelCase__: Tuple ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCamelCase__ , """rb""" ) as fp:
SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' )
SCREAMING_SNAKE_CASE__ = corpus.vocab.__dict__
torch.save(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(f'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
SCREAMING_SNAKE_CASE__ = os.path.abspath(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.abspath(UpperCamelCase__ )
print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
SCREAMING_SNAKE_CASE__ = TransfoXLConfig()
else:
SCREAMING_SNAKE_CASE__ = TransfoXLConfig.from_json_file(UpperCamelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ = TransfoXLLMHeadModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = load_tf_weights_in_transfo_xl(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f'''Save PyTorch model to {os.path.abspath(UpperCamelCase__ )}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
print(f'''Save configuration file to {os.path.abspath(UpperCamelCase__ )}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
_lowerCamelCase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 59
| 1
|
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59
|
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str ):
def get_masked_lm_array(UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = f'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_array(UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = f'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_layer_array(UpperCamelCase__: int , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = f'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_attention_layer_array(UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = f'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = array.reshape(UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
print(f'''Loading model based on config from {config_path}...''' )
SCREAMING_SNAKE_CASE__ = BertConfig.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = BertForMaskedLM(UpperCamelCase__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ = model.bert.encoder.layer[layer_index]
# Self-attention
SCREAMING_SNAKE_CASE__ = layer.attention.self
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_query_dense/bias""" , self_attn.query.bias.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_key_dense/bias""" , self_attn.key.bias.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
SCREAMING_SNAKE_CASE__ = layer.attention.output
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_output_dense/bias""" , self_output.dense.bias.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_attention_layer_norm/gamma""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_attention_layer_norm/beta""" )
# Intermediate
SCREAMING_SNAKE_CASE__ = layer.intermediate
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_intermediate_dense/kernel""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_intermediate_dense/bias""" )
# Output
SCREAMING_SNAKE_CASE__ = layer.output
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_dense/kernel""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_dense/bias""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_layer_norm/gamma""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_layer_norm/beta""" )
# Embeddings
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_position_embedding_layer/embeddings""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_type_embedding_layer/embeddings""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_embedding_norm_layer/gamma""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
SCREAMING_SNAKE_CASE__ = model.cls.predictions.transform
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""dense/kernel""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""dense/bias""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""layer_norm/gamma""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""layer_norm/beta""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""embedding_table""" )
# Pooling
SCREAMING_SNAKE_CASE__ = BertPooler(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_pooler_layer/kernel""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(UpperCamelCase__ )
# Integration test - should load without any errors ;)
SCREAMING_SNAKE_CASE__ = BertForMaskedLM.from_pretrained(UpperCamelCase__ )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowerCamelCase = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 59
| 1
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase_ ( unittest.TestCase , UpperCamelCase__ ):
def _snake_case ( self :Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_tool("""text-to-speech""" )
self.tool.setup()
def _snake_case ( self :Optional[Any] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = self.tool("""hey""" )
SCREAMING_SNAKE_CASE__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = self.tool("""hey""" )
SCREAMING_SNAKE_CASE__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 59
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCamelCase = '\\n Text data.\n Second line of data.'
_lowerCamelCase = 'file'
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
SCREAMING_SNAKE_CASE__ = bytes(UpperCamelCase__ , """utf-8""" )
with zstd.open(UpperCamelCase__ , """wb""" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
with open(os.path.join(tmpfs.local_root_dir , UpperCamelCase__ ) , """w""" ) as f:
f.write(UpperCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Dict , UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
SCREAMING_SNAKE_CASE__ = input_paths[compression_format]
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = DownloadConfig(cache_dir=UpperCamelCase__ , extract_compressed_file=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = """custom_cache"""
SCREAMING_SNAKE_CASE__ = """custom_extracted_dir"""
SCREAMING_SNAKE_CASE__ = tmp_path / """custom_extracted_path"""
if default_extracted:
SCREAMING_SNAKE_CASE__ = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , UpperCamelCase__ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
SCREAMING_SNAKE_CASE__ = xz_file
SCREAMING_SNAKE_CASE__ = (
DownloadConfig(extract_compressed_file=UpperCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=UpperCamelCase__ )
)
SCREAMING_SNAKE_CASE__ = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
assert Path(UpperCamelCase__ ).parent.parts[-2:] == expected
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] ):
# absolute path
SCREAMING_SNAKE_CASE__ = str(Path(UpperCamelCase__ ).resolve() )
assert cached_path(UpperCamelCase__ ) == text_file
# relative path
SCREAMING_SNAKE_CASE__ = str(Path(UpperCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(UpperCamelCase__ ) == text_file
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
# absolute path
SCREAMING_SNAKE_CASE__ = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
# relative path
SCREAMING_SNAKE_CASE__ = """./__missing_file__.txt"""
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ):
with pytest.raises(UpperCamelCase__ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(UpperCamelCase__ ):
http_get("""https://huggingface.co""" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(UpperCamelCase__ ):
ftp_get("""ftp://huggingface.co""" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(UpperCamelCase__ ):
fsspec_get("""s3://huggingface.co""" , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
fsspec_head("""s3://huggingface.co""" )
| 59
| 1
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowerCamelCase = ['small', 'medium', 'large']
_lowerCamelCase = 'lm_head.decoder.weight'
_lowerCamelCase = 'lm_head.weight'
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = d.pop(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
_lowerCamelCase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowerCamelCase = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''')
_lowerCamelCase = F'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 59
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_lowerCamelCase = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=UpperCamelCase__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=UpperCamelCase__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=UpperCamelCase__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=UpperCamelCase__ , default=1_000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=UpperCamelCase__ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=UpperCamelCase__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=UpperCamelCase__ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
def fn(UpperCamelCase__: Any ):
return tokenizer(examples["""text"""] )
return fn
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
SCREAMING_SNAKE_CASE__ = tf.train.Features(feature=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = tf.train.Example(features=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = example.SerializeToString()
records.append(UpperCamelCase__ )
return records
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
SCREAMING_SNAKE_CASE__ = min(len(UpperCamelCase__ ) , args.limit )
SCREAMING_SNAKE_CASE__ = dataset.select(range(UpperCamelCase__ ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
SCREAMING_SNAKE_CASE__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
SCREAMING_SNAKE_CASE__ = tokenize_function(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = dataset.map(UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(UpperCamelCase__: int ):
# Concatenate all texts.
SCREAMING_SNAKE_CASE__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
SCREAMING_SNAKE_CASE__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
SCREAMING_SNAKE_CASE__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
SCREAMING_SNAKE_CASE__ = {
k: [t[i : i + args.max_length] for i in range(0 , UpperCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
SCREAMING_SNAKE_CASE__ = dataset_tokenized.map(UpperCamelCase__ , batched=UpperCamelCase__ , batch_size=1_000 , num_proc=4 )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for shard in range(0 , len(UpperCamelCase__ ) , args.shard_size ):
SCREAMING_SNAKE_CASE__ = grouped_dataset[shard : shard + args.shard_size]
SCREAMING_SNAKE_CASE__ = len(dataset_snapshot["""input_ids"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
SCREAMING_SNAKE_CASE__ = get_serialized_examples(UpperCamelCase__ )
with tf.io.TFRecordWriter(UpperCamelCase__ ) as out_file:
for i in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE__ = serialized_examples[i]
out_file.write(UpperCamelCase__ )
print("""Wrote file {} containing {} records""".format(UpperCamelCase__ , UpperCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , """w""" ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = parse_args()
main(args)
| 59
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "gpt_bigcode"
lowerCamelCase_ = ["past_key_values"]
lowerCamelCase_ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self :Optional[Any] , __A :List[str]=5_0257 , __A :int=1024 , __A :Any=768 , __A :Optional[int]=12 , __A :Optional[int]=12 , __A :str=None , __A :Optional[Any]="gelu_pytorch_tanh" , __A :Dict=0.1 , __A :Optional[int]=0.1 , __A :List[Any]=0.1 , __A :Dict=1E-5 , __A :List[str]=0.0_2 , __A :Union[str, Any]=True , __A :int=True , __A :str=5_0256 , __A :List[str]=5_0256 , __A :List[str]=True , __A :List[Any]=True , __A :Optional[Any]=True , **__A :Optional[int] , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_positions
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_inner
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scale_attn_weights
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ = scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ = multi_query
SCREAMING_SNAKE_CASE__ = bos_token_id
SCREAMING_SNAKE_CASE__ = eos_token_id
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
| 59
|
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list[float]] ):
SCREAMING_SNAKE_CASE__ = []
for data in source_data:
for i, el in enumerate(UpperCamelCase__ ):
if len(UpperCamelCase__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(UpperCamelCase__ ) )
return data_lists
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list[float]] , UpperCamelCase__: list[int] ):
SCREAMING_SNAKE_CASE__ = []
for dlist, weight in zip(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = min(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = max(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
SCREAMING_SNAKE_CASE__ = f'''Invalid weight of {weight:f} provided'''
raise ValueError(UpperCamelCase__ )
score_lists.append(UpperCamelCase__ )
return score_lists
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list[float]] ):
SCREAMING_SNAKE_CASE__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = final_scores[j] + ele
return final_scores
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[list[float]] , UpperCamelCase__: list[int] ):
SCREAMING_SNAKE_CASE__ = get_data(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = calculate_each_score(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = generate_final_scores(UpperCamelCase__ )
# append scores to source data
for i, ele in enumerate(UpperCamelCase__ ):
source_data[i].append(UpperCamelCase__ )
return source_data
| 59
| 1
|
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: np.ndarray , UpperCamelCase__: float , UpperCamelCase__: int = 16_000 ):
SCREAMING_SNAKE_CASE__ = int(round(sample_rate * max_length ) )
if len(UpperCamelCase__ ) <= sample_length:
return wav
SCREAMING_SNAKE_CASE__ = randint(0 , len(UpperCamelCase__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = field(default=UpperCamelCase__ , metadata={"help": "Name of a dataset from the datasets package"} )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={"help": "A file containing the training audio paths and labels."} )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={"help": "A file containing the validation audio paths and labels."} )
lowerCamelCase_ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowerCamelCase_ = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
lowerCamelCase_ = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
lowerCamelCase_ = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
lowerCamelCase_ = field(
default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
lowerCamelCase_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={"help": "Name or path of preprocessor config."} )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _snake_case ( self :str ) -> List[str]:
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , __A , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def SCREAMING_SNAKE_CASE__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
SCREAMING_SNAKE_CASE__ = DatasetDict()
SCREAMING_SNAKE_CASE__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
f'''{', '.join(raw_datasets['train'].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"""Make sure to set `--label_column_name` to the correct text column - one of """
f'''{', '.join(raw_datasets['train'].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
SCREAMING_SNAKE_CASE__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
SCREAMING_SNAKE_CASE__ = feature_extractor.model_input_names[0]
def train_transforms(UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = []
for audio in batch[data_args.audio_column_name]:
SCREAMING_SNAKE_CASE__ = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = feature_extractor(UpperCamelCase__ , sampling_rate=feature_extractor.sampling_rate )
SCREAMING_SNAKE_CASE__ = {model_input_name: inputs.get(UpperCamelCase__ )}
SCREAMING_SNAKE_CASE__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
SCREAMING_SNAKE_CASE__ = feature_extractor(UpperCamelCase__ , sampling_rate=feature_extractor.sampling_rate )
SCREAMING_SNAKE_CASE__ = {model_input_name: inputs.get(UpperCamelCase__ )}
SCREAMING_SNAKE_CASE__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE__ = raw_datasets["""train"""].features[data_args.label_column_name].names
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = {}, {}
for i, label in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = str(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE__ = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=UpperCamelCase__ , references=eval_pred.label_ids )
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel=UpperCamelCase__ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE__ = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(UpperCamelCase__ , output_all_columns=UpperCamelCase__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE__ = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(UpperCamelCase__ , output_all_columns=UpperCamelCase__ )
# Initialize our trainer
SCREAMING_SNAKE_CASE__ = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=UpperCamelCase__ , tokenizer=UpperCamelCase__ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE__ = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE__ = last_checkpoint
SCREAMING_SNAKE_CASE__ = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE__ = trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCamelCase__ )
trainer.save_metrics("""eval""" , UpperCamelCase__ )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE__ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
if __name__ == "__main__":
main()
| 59
|
import warnings
from functools import wraps
from typing import Callable
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Callable ):
@wraps(UpperCamelCase__ )
def _inner_fn(*UpperCamelCase__: Dict , **UpperCamelCase__: Any ):
warnings.warn(
(f'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , UpperCamelCase__ , )
return fn(*UpperCamelCase__ , **UpperCamelCase__ )
return _inner_fn
| 59
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_lowerCamelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase = {
'vocab_file': {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt',
},
'tokenizer_file': {
'unc-nlp/lxmert-base-uncased': (
'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase = {
'unc-nlp/lxmert-base-uncased': 512,
}
_lowerCamelCase = {
'unc-nlp/lxmert-base-uncased': {'do_lower_case': True},
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = LxmertTokenizer
def __init__( self :List[str] , __A :Tuple=None , __A :Dict=None , __A :str=True , __A :Optional[Any]="[UNK]" , __A :Union[str, Any]="[SEP]" , __A :str="[PAD]" , __A :Optional[Any]="[CLS]" , __A :Tuple="[MASK]" , __A :Tuple=True , __A :Dict=None , **__A :Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __A ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __A ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ = getattr(__A , normalizer_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ = normalizer_class(**__A )
SCREAMING_SNAKE_CASE__ = do_lower_case
def _snake_case ( self :str , __A :Dict , __A :str=None ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self :str , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self :List[str] , __A :str , __A :Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 59
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = RoCBertTokenizer
lowerCamelCase_ = None
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = filter_non_english
def _snake_case ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
for i, value in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(__A , __A , ensure_ascii=__A )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(__A , __A , ensure_ascii=__A )
def _snake_case ( self :List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(__A , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__A ) , [5, 6, 2, 5, 7, 8] )
def _snake_case ( self :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def _snake_case ( self :Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self :int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self :List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def _snake_case ( self :Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
SCREAMING_SNAKE_CASE__ = {}
for i, token in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = RoCBertWordpieceTokenizer(vocab=__A , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def _snake_case ( self :Any ) -> str:
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def _snake_case ( self :int ) -> str:
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def _snake_case ( self :int ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode_plus(
__A , return_attention_mask=__A , return_token_type_ids=__A , return_offsets_mapping=__A , add_special_tokens=__A , )
SCREAMING_SNAKE_CASE__ = tokenizer_r.do_lower_case if hasattr(__A , """do_lower_case""" ) else False
SCREAMING_SNAKE_CASE__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def _snake_case ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""的""", """人""", """有"""]
SCREAMING_SNAKE_CASE__ = """""".join(__A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.convert_ids_to_tokens(__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.convert_ids_to_tokens(__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE__ = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(__A )
]
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""你好""" , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""你是谁""" , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ = """你好,你是谁"""
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_shape_ids(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_pronunciation_ids(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.prepare_for_model(
__A , __A , __A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode_plus(__A , add_special_tokens=__A )
self.assertEqual(__A , __A )
| 59
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'sentencepiece.bpe.model'}
_lowerCamelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
_lowerCamelCase = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
_lowerCamelCase = '▁'
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ["input_ids", "attention_mask"]
def __init__( self :List[Any] , __A :Tuple , __A :Any="<s>" , __A :List[Any]="</s>" , __A :Optional[Any]="</s>" , __A :Optional[int]="<s>" , __A :Tuple="<unk>" , __A :List[Any]="<pad>" , __A :Dict="<mask>" , __A :Optional[Dict[str, Any]] = None , **__A :Tuple , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
SCREAMING_SNAKE_CASE__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
SCREAMING_SNAKE_CASE__ = len(self.sp_model ) - 1
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _snake_case ( self :Union[str, Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _snake_case ( self :Tuple , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
return len(self.sp_model )
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self :Dict , __A :str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__A , out_type=__A )
def _snake_case ( self :Dict , __A :List[Any] ) -> Optional[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE__ = self.sp_model.PieceToId(__A )
return spm_id if spm_id else self.unk_token_id
def _snake_case ( self :str , __A :Optional[Any] ) -> int:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__A )
def _snake_case ( self :List[str] , __A :Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = """"""
SCREAMING_SNAKE_CASE__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = []
else:
current_sub_tokens.append(__A )
SCREAMING_SNAKE_CASE__ = False
out_string += self.sp_model.decode(__A )
return out_string.strip()
def __getstate__( self :List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
return state
def __setstate__( self :Optional[Any] , __A :List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self :Optional[int] , __A :str , __A :Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 59
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = original_name.split(""".""" )[0]
SCREAMING_SNAKE_CASE__ = key.split(""".""" )
SCREAMING_SNAKE_CASE__ = int(key_list[key_list.index(UpperCamelCase__ ) - 2] )
SCREAMING_SNAKE_CASE__ = int(key_list[key_list.index(UpperCamelCase__ ) - 1] )
SCREAMING_SNAKE_CASE__ = orig_block_num - offset
SCREAMING_SNAKE_CASE__ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = OrderedDict()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
SCREAMING_SNAKE_CASE__ = key.replace("""network""" , """poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
SCREAMING_SNAKE_CASE__ = key[: key.find("""proj""" )]
SCREAMING_SNAKE_CASE__ = key.replace(UpperCamelCase__ , f'''patch_embeddings.{total_embed_found}.''' )
SCREAMING_SNAKE_CASE__ = key.replace("""proj""" , """projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
SCREAMING_SNAKE_CASE__ = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """mlp.fc1""" , """output.conv1""" )
if "mlp.fc2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """mlp.fc2""" , """output.conv2""" )
if "norm1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """norm1""" , """before_norm""" )
if "norm2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """norm2""" , """after_norm""" )
if "layer_scale_1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """layer_scale_1""" , """layer_scale_1""" )
if "layer_scale_2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """layer_scale_2""" , """layer_scale_2""" )
if "head" in key:
SCREAMING_SNAKE_CASE__ = key.replace("""head""" , """classifier""" )
SCREAMING_SNAKE_CASE__ = value
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = PoolFormerConfig()
# set attributes based on model_name
SCREAMING_SNAKE_CASE__ = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ = model_name[-3:]
SCREAMING_SNAKE_CASE__ = 1_000
SCREAMING_SNAKE_CASE__ = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE__ = (1, 1_000)
# set config attributes
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
if size == "s12":
SCREAMING_SNAKE_CASE__ = [2, 2, 6, 2]
SCREAMING_SNAKE_CASE__ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "s24":
SCREAMING_SNAKE_CASE__ = [4, 4, 12, 4]
SCREAMING_SNAKE_CASE__ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "s36":
SCREAMING_SNAKE_CASE__ = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE__ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "m36":
SCREAMING_SNAKE_CASE__ = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE__ = [96, 192, 384, 768]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9_5
elif size == "m48":
SCREAMING_SNAKE_CASE__ = [8, 8, 24, 8]
SCREAMING_SNAKE_CASE__ = [96, 192, 384, 768]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9_5
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
SCREAMING_SNAKE_CASE__ = PoolFormerImageProcessor(crop_pct=UpperCamelCase__ )
# Prepare image
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase__ , map_location=torch.device("""cpu""" ) )
# rename keys
SCREAMING_SNAKE_CASE__ = rename_keys(UpperCamelCase__ )
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ = PoolFormerForImageClassification(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# Define image processor
SCREAMING_SNAKE_CASE__ = PoolFormerImageProcessor(crop_pct=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values
# forward pass
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_lowerCamelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 59
| 1
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """ylacombe/bark-small"""
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = """en_speaker_1"""
SCREAMING_SNAKE_CASE__ = """This is a test string"""
SCREAMING_SNAKE_CASE__ = """speaker_embeddings_path.json"""
SCREAMING_SNAKE_CASE__ = """speaker_embeddings"""
def _snake_case ( self :List[str] , **__A :List[str] ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **__A )
def _snake_case ( self :List[str] ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _snake_case ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BarkProcessor(tokenizer=__A )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _snake_case ( self :Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _snake_case ( self :Dict ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE__ = 35
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = {
"""semantic_prompt""": np.ones(__A ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE__ = processor(text=self.input_string , voice_preset=__A )
SCREAMING_SNAKE_CASE__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__A , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(__A , **__A )
SCREAMING_SNAKE_CASE__ = processor(text=self.input_string , voice_preset=__A )
SCREAMING_SNAKE_CASE__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__A , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = BarkProcessor(tokenizer=__A )
SCREAMING_SNAKE_CASE__ = processor(text=self.input_string )
SCREAMING_SNAKE_CASE__ = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=__A , return_attention_mask=__A , return_token_type_ids=__A , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 59
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "pix2struct_text_model"
lowerCamelCase_ = ["past_key_values"]
lowerCamelCase_ = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :Union[str, Any] , __A :Any=5_0244 , __A :Optional[Any]=768 , __A :Tuple=64 , __A :List[str]=2048 , __A :int=12 , __A :str=12 , __A :Any=32 , __A :Tuple=128 , __A :int=0.1 , __A :str=1E-6 , __A :Optional[Any]=1.0 , __A :Union[str, Any]="gelu_new" , __A :Any=0 , __A :List[str]=False , __A :Optional[Any]=0 , __A :int=1 , __A :Optional[int]=False , __A :Optional[Any]=True , **__A :List[Any] , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = d_kv
SCREAMING_SNAKE_CASE__ = d_ff
SCREAMING_SNAKE_CASE__ = num_layers
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ = dropout_rate
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = decoder_start_token_id
# for backwards compatibility
SCREAMING_SNAKE_CASE__ = dense_act_fn
super().__init__(
pad_token_id=__A , eos_token_id=__A , decoder_start_token_id=__A , tie_word_embeddings=__A , is_decoder=__A , **__A , )
@classmethod
def _snake_case ( cls :Optional[int] , __A :Union[str, os.PathLike] , **__A :Optional[int] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(__A , **__A )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
SCREAMING_SNAKE_CASE__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "pix2struct_vision_model"
def __init__( self :Optional[int] , __A :int=768 , __A :Optional[Any]=768 , __A :Union[str, Any]=2048 , __A :int=64 , __A :Union[str, Any]=12 , __A :str=12 , __A :Any="gelu_new" , __A :List[Any]=1E-6 , __A :Dict=0.0 , __A :int=0.0 , __A :int=1E-10 , __A :Dict=1.0 , __A :int=4096 , __A :int=32 , __A :int=128 , **__A :Tuple , ) -> str:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = patch_embed_hidden_size
SCREAMING_SNAKE_CASE__ = d_ff
SCREAMING_SNAKE_CASE__ = dropout_rate
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = dense_act_fn
SCREAMING_SNAKE_CASE__ = seq_len
SCREAMING_SNAKE_CASE__ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ = d_kv
@classmethod
def _snake_case ( cls :str , __A :Union[str, os.PathLike] , **__A :str ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
SCREAMING_SNAKE_CASE__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "pix2struct"
lowerCamelCase_ = True
def __init__( self :str , __A :Optional[Any]=None , __A :List[str]=None , __A :Optional[Any]=1.0 , __A :Optional[Any]=0.0_2 , __A :Any=False , __A :Tuple=False , __A :Any=True , **__A :Dict , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(tie_word_embeddings=__A , is_encoder_decoder=__A , **__A )
if text_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE__ = PixaStructTextConfig(**__A )
SCREAMING_SNAKE_CASE__ = PixaStructVisionConfig(**__A )
SCREAMING_SNAKE_CASE__ = self.text_config.decoder_start_token_id
SCREAMING_SNAKE_CASE__ = self.text_config.pad_token_id
SCREAMING_SNAKE_CASE__ = self.text_config.eos_token_id
SCREAMING_SNAKE_CASE__ = initializer_factor
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = self.initializer_range
SCREAMING_SNAKE_CASE__ = self.initializer_range
SCREAMING_SNAKE_CASE__ = is_vqa
@classmethod
def _snake_case ( cls :Union[str, Any] , __A :PixaStructTextConfig , __A :PixaStructVisionConfig , **__A :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def _snake_case ( self :str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
| 59
| 1
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCamelCase_ :
def __init__( self :List[Any] , __A :List[Any] , __A :Optional[Any]=13 , __A :Optional[int]=7 , __A :Tuple=True , __A :Union[str, Any]=True , __A :List[Any]=True , __A :str=True , __A :Tuple=99 , __A :Union[str, Any]=64 , __A :Tuple=32 , __A :Optional[int]=5 , __A :int=4 , __A :Optional[int]=37 , __A :str="gelu" , __A :List[str]=0.1 , __A :str=0.1 , __A :List[Any]=512 , __A :str=16 , __A :int=2 , __A :List[str]=0.0_2 , __A :List[Any]=3 , __A :Optional[int]=4 , __A :Tuple=None , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = embedding_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def _snake_case ( self :int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
def _snake_case ( self :List[str] , __A :Optional[int] , __A :List[Any] , __A :List[Any] , __A :Any , __A :List[Any] , __A :Any , __A :Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , token_type_ids=__A )
SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A )
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self :Any , __A :Union[str, Any] , __A :List[str] , __A :Union[str, Any] , __A :Any , __A :Tuple , __A :int , __A :int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertForMaskedLM(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :str , __A :Tuple , __A :List[Any] , __A :Any , __A :Optional[Any] , __A :int , __A :Union[str, Any] , __A :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertForCausalLM(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :Optional[int] , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :List[str] , __A :List[Any] , __A :Optional[int] , __A :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertForNextSentencePrediction(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self :Optional[int] , __A :Optional[Any] , __A :str , __A :int , __A :List[str] , __A :Optional[Any] , __A :List[Any] , __A :List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertForPreTraining(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , next_sentence_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self :Dict , __A :List[Any] , __A :int , __A :Any , __A :Optional[int] , __A :int , __A :Union[str, Any] , __A :int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self :str , __A :Union[str, Any] , __A :Any , __A :Optional[Any] , __A :Union[str, Any] , __A :str , __A :int , __A :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = MegatronBertForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self :List[Any] , __A :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :List[Any] , __A :Union[str, Any] , __A :List[str] , __A :List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = MegatronBertForTokenClassification(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self :Dict , __A :str , __A :List[str] , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :List[str] , __A :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = MegatronBertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = True
# test_resize_embeddings = False
lowerCamelCase_ = False
def _snake_case ( self :Optional[Any] , __A :Union[str, Any] , __A :List[Any] , __A :Optional[int]=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
SCREAMING_SNAKE_CASE__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _snake_case ( self :Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , hidden_size=37 )
def _snake_case ( self :Optional[Any] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__A )
def _snake_case ( self :List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__A )
def _snake_case ( self :Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__A )
def _snake_case ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__A )
def _snake_case ( self :List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__A )
def _snake_case ( self :Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__A )
def _snake_case ( self :Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__A )
def _snake_case ( self :Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__A )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
return torch.tensor(
UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ , )
_lowerCamelCase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""" )
def _snake_case ( self :str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE__ = os.path.join(os.environ["""MYDIR"""] , __A )
SCREAMING_SNAKE_CASE__ = MegatronBertModel.from_pretrained(__A )
model.to(__A )
model.half()
SCREAMING_SNAKE_CASE__ = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__A )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , __A )
SCREAMING_SNAKE_CASE__ = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE__ = output[0, ii, jj]
SCREAMING_SNAKE_CASE__ = expected[3 * ii + jj]
SCREAMING_SNAKE_CASE__ = """ii={} jj={} a={} b={}""".format(__A , __A , __A , __A )
self.assertTrue(math.isclose(__A , __A , rel_tol=__A , abs_tol=__A ) , msg=__A )
| 59
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def _snake_case ( self :Optional[Any] ) -> Tuple:
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices.''' )
SCREAMING_SNAKE_CASE__ = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
@require_multi_gpu
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices.''' )
SCREAMING_SNAKE_CASE__ = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
@require_multi_gpu
def _snake_case ( self :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
@require_multi_gpu
def _snake_case ( self :Optional[int] ) -> str:
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
SCREAMING_SNAKE_CASE__ = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(__A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase = Accelerator()
_lowerCamelCase = (accelerator.state.process_index + 2, 10)
_lowerCamelCase = torch.randint(0, 10, shape).to(accelerator.device)
_lowerCamelCase = ''
_lowerCamelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCamelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCamelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 59
| 1
|
_lowerCamelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 59
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['LayoutLMv3FeatureExtractor']
_lowerCamelCase = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 59
| 1
|
from __future__ import annotations
from math import pi
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59
|
import inspect
import unittest
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE__ = inspect.getmembers(__A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE__ = """k-diffusion"""
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE__ = """invisible-watermark"""
assert backend in deps, f'''{backend} is not in the deps table!'''
| 59
| 1
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(UpperCamelCase__ )
EnvironmentCommand.register_subcommand(UpperCamelCase__ )
TestCommand.register_subcommand(UpperCamelCase__ )
RunBeamCommand.register_subcommand(UpperCamelCase__ )
DummyDataCommand.register_subcommand(UpperCamelCase__ )
# Parse args
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = parser.parse_known_args()
if not hasattr(UpperCamelCase__ , """func""" ):
parser.print_help()
exit(1 )
SCREAMING_SNAKE_CASE__ = parse_unknown_args(UpperCamelCase__ )
# Run
SCREAMING_SNAKE_CASE__ = args.func(UpperCamelCase__ , **UpperCamelCase__ )
service.run()
if __name__ == "__main__":
main()
| 59
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , UpperCamelCase__ , )
if isinstance(UpperCamelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image[0].size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE__ = np.concatenate(UpperCamelCase__ , axis=0 )
SCREAMING_SNAKE_CASE__ = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE__ = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCamelCase__ , dim=0 )
return image
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(UpperCamelCase__ , torch.Tensor ):
return mask
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = mask[0].size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE__ = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE__ = np.concatenate(UpperCamelCase__ , axis=0 )
SCREAMING_SNAKE_CASE__ = mask.astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
elif isinstance(mask[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCamelCase__ , dim=0 )
return mask
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
lowerCamelCase_ = 42
def __init__( self :Any , __A :List[Any] , __A :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self :str , __A :Union[torch.Tensor, PIL.Image.Image] , __A :Union[torch.Tensor, PIL.Image.Image] , __A :int = 250 , __A :float = 0.0 , __A :int = 10 , __A :int = 10 , __A :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __A :Optional[str] = "pil" , __A :bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = image
SCREAMING_SNAKE_CASE__ = _preprocess_image(__A )
SCREAMING_SNAKE_CASE__ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = _preprocess_mask(__A )
SCREAMING_SNAKE_CASE__ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__A , __A ) and len(__A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = original_image.shape
SCREAMING_SNAKE_CASE__ = randn_tensor(__A , generator=__A , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__A , __A , __A , self.device )
SCREAMING_SNAKE_CASE__ = eta
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE__ = generator[0] if isinstance(__A , __A ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE__ = self.unet(__A , __A ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__A , __A , __A , __A , __A , __A ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE__ = self.scheduler.undo_step(__A , __A , __A )
SCREAMING_SNAKE_CASE__ = t
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
| 59
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 59
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = OpenAIGPTTokenizer
lowerCamelCase_ = OpenAIGPTTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = False
def _snake_case ( self :Optional[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE__ = dict(zip(__A , range(len(__A ) ) ) )
SCREAMING_SNAKE_CASE__ = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__A ) )
def _snake_case ( self :Union[str, Any] , __A :str ) -> List[Any]:
"""simple docstring"""
return "lower newer", "lower newer"
def _snake_case ( self :Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE__ = """lower"""
SCREAMING_SNAKE_CASE__ = ["""low""", """er</w>"""]
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = tokens + ["""<unk>"""]
SCREAMING_SNAKE_CASE__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def _snake_case ( self :Optional[Any] , __A :Optional[Any]=15 ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
# Simple input
SCREAMING_SNAKE_CASE__ = """This is a simple input"""
SCREAMING_SNAKE_CASE__ = ["""This is a simple input 1""", """This is a simple input 2"""]
SCREAMING_SNAKE_CASE__ = ("""This is a simple input""", """This is a pair""")
SCREAMING_SNAKE_CASE__ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="""max_length""" )
# Simple input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="""max_length""" )
# Simple input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="""max_length""" , )
# Pair input
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="""max_length""" )
# Pair input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="""max_length""" )
# Pair input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="""max_length""" , )
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase__ ):
pass
| 59
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.