code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowercase_ = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.txt'''}
lowercase_ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowercase_ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowercase_ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ConvBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ) -> Union[str, Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
_lowerCAmelCase =getattr(__A , normalizer_state.pop('type' ) )
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =strip_accents
_lowerCAmelCase =tokenize_chinese_chars
_lowerCAmelCase =normalizer_class(**__A )
_lowerCAmelCase =do_lower_case
def UpperCamelCase__ ( self , __A , __A=None ) -> int:
_lowerCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 58
| 1
|
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowercase_ = '''__DUMMY_TRANSFORMERS_USER__'''
lowercase_ = '''Dummy User'''
lowercase_ = '''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'''
lowercase_ = '''https://hub-ci.huggingface.co'''
lowercase_ = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
lowercase_ = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
lowercase_ = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , a__ )
@pytest.fixture
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , a__ )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , a__ )
@pytest.fixture
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , a__ )
@pytest.fixture
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
HfFolder.save_token(a__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def UpperCamelCase__ ( ):
'''simple docstring'''
return HfApi(endpoint=a__ )
@pytest.fixture(scope='session' )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =HfFolder.get_token()
HfFolder.save_token(a__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(a__ )
@pytest.fixture
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
def _cleanup_repo(a__ ):
hf_api.delete_repo(a__ , token=a__ , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
@contextmanager
def _temporary_repo(a__ ):
try:
yield repo_id
finally:
cleanup_repo(a__ )
return _temporary_repo
@pytest.fixture(scope='session' )
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =F'''repo_txt_data-{int(time.time() * 10E3 )}'''
_lowerCAmelCase =F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(a__ , token=a__ , repo_type='dataset' , private=a__ )
hf_api.upload_file(
token=a__ , path_or_fileobj=str(a__ ) , path_in_repo='data/text_data.txt' , repo_id=a__ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(a__ , token=a__ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =F'''repo_zipped_txt_data-{int(time.time() * 10E3 )}'''
_lowerCAmelCase =F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(a__ , token=a__ , repo_type='dataset' , private=a__ )
hf_api.upload_file(
token=a__ , path_or_fileobj=str(a__ ) , path_in_repo='data.zip' , repo_id=a__ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(a__ , token=a__ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =F'''repo_zipped_img_data-{int(time.time() * 10E3 )}'''
_lowerCAmelCase =F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(a__ , token=a__ , repo_type='dataset' , private=a__ )
hf_api.upload_file(
token=a__ , path_or_fileobj=str(a__ ) , path_in_repo='data.zip' , repo_id=a__ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(a__ , token=a__ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 58
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = ['image_processor', 'tokenizer']
lowercase : Any = 'CLIPImageProcessor'
lowercase : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __A=None , __A=None , **__A ) -> str:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
_lowerCAmelCase =kwargs.pop('feature_extractor' )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Optional[int]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 58
| 1
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = ['image_processor', 'tokenizer']
lowercase : List[Any] = 'BlipImageProcessor'
lowercase : str = 'AutoTokenizer'
def __init__( self , __A , __A , __A ) -> Optional[Any]:
super().__init__(__A , __A )
# add QFormer tokenizer
_lowerCAmelCase =qformer_tokenizer
def __call__( self , __A = None , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_lowerCAmelCase =BatchFeature()
if text is not None:
_lowerCAmelCase =self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
encoding.update(__A )
_lowerCAmelCase =self.qformer_tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
_lowerCAmelCase =qformer_text_encoding.pop('input_ids' )
_lowerCAmelCase =qformer_text_encoding.pop('attention_mask' )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A )
encoding.update(__A )
return encoding
def UpperCamelCase__ ( self , *__A , **__A ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Tuple:
return self.tokenizer.decode(*__A , **__A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase__ ( self , __A , **__A ) -> List[str]:
if os.path.isfile(__A ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__A , exist_ok=__A )
_lowerCAmelCase =os.path.join(__A , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(__A )
return super().save_pretrained(__A , **__A )
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> int:
_lowerCAmelCase =AutoTokenizer.from_pretrained(__A , subfolder='qformer_tokenizer' )
_lowerCAmelCase =cls._get_arguments_from_pretrained(__A , **__A )
args.append(__A )
return cls(*__A )
| 58
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ) -> str:
super().__init__()
_lowerCAmelCase =nn.Sequential(
nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , )
_lowerCAmelCase =nn.Embedding(__A , __A )
_lowerCAmelCase =False
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.ModuleList()
for lyr_num in range(__A ):
# FiLM conditional T5 decoder
_lowerCAmelCase =DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A )
self.decoders.append(__A )
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Any:
_lowerCAmelCase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase =self.conditioning_emb(__A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase =torch.broadcast_to(
torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase =self.position_encoding(__A )
_lowerCAmelCase =self.continuous_inputs_projection(__A )
inputs += position_encodings
_lowerCAmelCase =self.dropout(__A )
# decoder: No padding present.
_lowerCAmelCase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase =[(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase =lyr(
__A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0]
_lowerCAmelCase =self.decoder_norm(__A )
_lowerCAmelCase =self.post_dropout(__A )
_lowerCAmelCase =self.spec_out(__A )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Any:
_lowerCAmelCase =self.layer[0](
__A , conditioning_emb=__A , attention_mask=__A , )
if encoder_hidden_states is not None:
_lowerCAmelCase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase =self.layer[1](
__A , key_value_states=__A , attention_mask=__A , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase =self.layer[-1](__A , __A )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> List[Any]:
# pre_self_attention_layer_norm
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.FiLMLayer(__A , __A )
# Self-attention block
_lowerCAmelCase =self.attention(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[int]:
super().__init__()
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> Tuple:
_lowerCAmelCase =self.layer_norm(__A )
_lowerCAmelCase =self.attention(
__A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.film(__A , __A )
_lowerCAmelCase =self.DenseReluDense(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(__A )
_lowerCAmelCase =NewGELUActivation()
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =self.act(self.wi_a(__A ) )
_lowerCAmelCase =self.wi_a(__A )
_lowerCAmelCase =hidden_gelu * hidden_linear
_lowerCAmelCase =self.dropout(__A )
_lowerCAmelCase =self.wo(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A=1E-6 ) -> int:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.ones(__A ) )
_lowerCAmelCase =eps
def UpperCamelCase__ ( self , __A ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A )
_lowerCAmelCase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__A , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , out_features * 2 , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.scale_bias(__A )
_lowerCAmelCase , _lowerCAmelCase =torch.chunk(__A , 2 , -1 )
_lowerCAmelCase =x * (1 + scale) + shift
return x
| 58
| 1
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCamelCase__ ( a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_lowerCAmelCase =cst_fwd.get(a__ , np.inf )
_lowerCAmelCase =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_lowerCAmelCase =new_cost_f
_lowerCAmelCase =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_lowerCAmelCase =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =-1
_lowerCAmelCase =set()
_lowerCAmelCase =set()
_lowerCAmelCase ={source: 0}
_lowerCAmelCase ={destination: 0}
_lowerCAmelCase ={source: None}
_lowerCAmelCase ={destination: None}
_lowerCAmelCase =PriorityQueue()
_lowerCAmelCase =PriorityQueue()
_lowerCAmelCase =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_lowerCAmelCase , _lowerCAmelCase =queue_forward.get()
visited_forward.add(a__ )
_lowerCAmelCase , _lowerCAmelCase =queue_backward.get()
visited_backward.add(a__ )
_lowerCAmelCase =pass_and_relaxation(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
_lowerCAmelCase =pass_and_relaxation(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_lowerCAmelCase =shortest_distance
return shortest_path_distance
lowercase_ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
lowercase_ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
| 1
|
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
_lowerCAmelCase =re.match(r'^mobilenet_v1_([^_]*)_([^_]*)$' , a__ )
if matches:
_lowerCAmelCase =float(matches[1] )
_lowerCAmelCase =int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_lowerCAmelCase =1_0_0_1
_lowerCAmelCase ='imagenet-1k-id2label.json'
_lowerCAmelCase ='huggingface/label-files'
_lowerCAmelCase =json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase ={int(a__ ) + 1: v for k, v in idalabel.items()}
_lowerCAmelCase ='background'
_lowerCAmelCase =idalabel
_lowerCAmelCase ={v: k for k, v in idalabel.items()}
return config
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase =Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( a__ , a__ , a__ , a__=False ):
'''simple docstring'''
_lowerCAmelCase =get_mobilenet_va_config(a__ )
# Load 🤗 model
_lowerCAmelCase =MobileNetVaForImageClassification(a__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(a__ , a__ , a__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_lowerCAmelCase =MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 3_2} , )
_lowerCAmelCase =image_processor(images=prepare_img() , return_tensors='pt' )
_lowerCAmelCase =model(**a__ )
_lowerCAmelCase =outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
_lowerCAmelCase =torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
_lowerCAmelCase =torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
_lowerCAmelCase =None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , a__ , atol=1E-4 )
Path(a__ ).mkdir(exist_ok=a__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if push_to_hub:
print('Pushing to the hub...' )
_lowerCAmelCase ='google/' + model_name
image_processor.push_to_hub(a__ )
model.push_to_hub(a__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 58
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
| 1
|
'''simple docstring'''
import os
import string
import sys
lowercase_ = 1 << 8
lowercase_ = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
lowercase_ = KEYMAP['''up''']
lowercase_ = KEYMAP['''left''']
if sys.platform == "win32":
lowercase_ = []
lowercase_ = {
b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
lowercase_ = ord(str(i))
def UpperCamelCase__ ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
_lowerCAmelCase ='mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(a__ ) == 0:
# Read the keystroke
_lowerCAmelCase =msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCAmelCase =ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCAmelCase =chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(a__ )
if ord(a__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
_lowerCAmelCase =chr(KEYMAP['esc'] )
except KeyError:
_lowerCAmelCase =cha[1]
else:
_lowerCAmelCase =ch.decode(a__ )
else:
_lowerCAmelCase =WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCAmelCase =sys.stdin.fileno()
_lowerCAmelCase =termios.tcgetattr(a__ )
try:
tty.setraw(a__ )
_lowerCAmelCase =sys.stdin.read(1 )
finally:
termios.tcsetattr(a__ , termios.TCSADRAIN , a__ )
return ch
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =get_raw_chars()
if ord(a__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(a__ ) == KEYMAP["esc"]:
_lowerCAmelCase =get_raw_chars()
if ord(a__ ) == KEYMAP["mod_int"]:
_lowerCAmelCase =get_raw_chars()
if ord(a__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(a__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(a__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 58
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
lowercase : str = 'bit'
lowercase : Any = ['preactivation', 'bottleneck']
lowercase : List[str] = ['SAME', 'VALID']
def __init__( self , __A=3 , __A=64 , __A=[256, 512, 1024, 2048] , __A=[3, 4, 6, 3] , __A="preactivation" , __A="relu" , __A=None , __A=32 , __A=0.0 , __A=False , __A=32 , __A=1 , __A=None , __A=None , **__A , ) -> Tuple:
super().__init__(**__A )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCAmelCase =global_padding.upper()
else:
raise ValueError(F'''Padding strategy {global_padding} not supported''' )
_lowerCAmelCase =num_channels
_lowerCAmelCase =embedding_size
_lowerCAmelCase =hidden_sizes
_lowerCAmelCase =depths
_lowerCAmelCase =layer_type
_lowerCAmelCase =hidden_act
_lowerCAmelCase =global_padding
_lowerCAmelCase =num_groups
_lowerCAmelCase =drop_path_rate
_lowerCAmelCase =embedding_dynamic_padding
_lowerCAmelCase =output_stride
_lowerCAmelCase =width_factor
_lowerCAmelCase =['stem'] + [F'''stage{idx}''' for idx in range(1 , len(__A ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase =get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
| 58
|
'''simple docstring'''
def UpperCamelCase__ ( a__ = 1_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =2**power
_lowerCAmelCase =0
while n:
_lowerCAmelCase , _lowerCAmelCase =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
| 1
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
lowercase_ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =Github(os.environ['GITHUB_TOKEN'] )
_lowerCAmelCase =g.get_repo('huggingface/transformers' )
_lowerCAmelCase =repo.get_issues(state='open' )
for issue in open_issues:
_lowerCAmelCase =sorted([comment for comment in issue.get_comments()] , key=lambda a__ : i.created_at , reverse=a__ )
_lowerCAmelCase =comments[0] if len(a__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 58
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCAmelCase =set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 58
| 1
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = '''#'''
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self ) -> None:
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A ) -> None:
_lowerCAmelCase =self._trie
for char in text:
if char not in trie:
_lowerCAmelCase ={}
_lowerCAmelCase =trie[char]
_lowerCAmelCase =True
def UpperCamelCase__ ( self , __A ) -> tuple | list:
_lowerCAmelCase =self._trie
for char in prefix:
if char in trie:
_lowerCAmelCase =trie[char]
else:
return []
return self._elements(__A )
def UpperCamelCase__ ( self , __A ) -> tuple:
_lowerCAmelCase =[]
for c, v in d.items():
_lowerCAmelCase =[' '] if c == END else [(c + s) for s in self._elements(__A )]
result.extend(__A )
return tuple(__A )
lowercase_ = Trie()
lowercase_ = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =trie.find_word(a__ )
return tuple(string + word for word in suffixes )
def UpperCamelCase__ ( ):
'''simple docstring'''
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 58
|
'''simple docstring'''
lowercase_ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='Morse code here!'
print(a__ )
_lowerCAmelCase =encrypt(a__ )
print(a__ )
_lowerCAmelCase =decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 58
| 1
|
'''simple docstring'''
import os
import sys
import unittest
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =find_backend(' if not is_torch_available():' )
self.assertEqual(__A , 'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_lowerCAmelCase =find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(__A , 'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_lowerCAmelCase =find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(__A , 'torch_and_transformers_and_onnx' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , __A )
self.assertIn('torch_and_transformers' , __A )
self.assertIn('flax_and_transformers' , __A )
self.assertIn('torch_and_transformers_and_onnx' , __A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] )
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] )
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(__A , '\nCONSTANT = None\n' )
_lowerCAmelCase =create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
__A , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
_lowerCAmelCase ='\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
_lowerCAmelCase =create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(__A , __A )
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase ='# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
_lowerCAmelCase =create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , __A )
| 58
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'data2vec-text'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 58
| 1
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = IFPipeline
lowercase : Tuple = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : int = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase__ ( self ) -> str:
return self._get_dummy_components()
def UpperCamelCase__ ( self , __A , __A=0 ) -> int:
if str(__A ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(__A )
else:
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(__A )
_lowerCAmelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ) -> str:
self._test_save_load_local()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
# if
_lowerCAmelCase =IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase =IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__A , tokenizer=__A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_lowerCAmelCase , _lowerCAmelCase =pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase =None
_lowerCAmelCase =None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase =IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase =IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase =IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase =IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__A , __A , __A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 58
| 1
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ) -> str:
super().__init__()
_lowerCAmelCase =nn.Sequential(
nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , )
_lowerCAmelCase =nn.Embedding(__A , __A )
_lowerCAmelCase =False
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.ModuleList()
for lyr_num in range(__A ):
# FiLM conditional T5 decoder
_lowerCAmelCase =DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A )
self.decoders.append(__A )
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Any:
_lowerCAmelCase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase =self.conditioning_emb(__A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase =torch.broadcast_to(
torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase =self.position_encoding(__A )
_lowerCAmelCase =self.continuous_inputs_projection(__A )
inputs += position_encodings
_lowerCAmelCase =self.dropout(__A )
# decoder: No padding present.
_lowerCAmelCase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase =[(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase =lyr(
__A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0]
_lowerCAmelCase =self.decoder_norm(__A )
_lowerCAmelCase =self.post_dropout(__A )
_lowerCAmelCase =self.spec_out(__A )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Any:
_lowerCAmelCase =self.layer[0](
__A , conditioning_emb=__A , attention_mask=__A , )
if encoder_hidden_states is not None:
_lowerCAmelCase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase =self.layer[1](
__A , key_value_states=__A , attention_mask=__A , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase =self.layer[-1](__A , __A )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> List[Any]:
# pre_self_attention_layer_norm
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.FiLMLayer(__A , __A )
# Self-attention block
_lowerCAmelCase =self.attention(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[int]:
super().__init__()
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> Tuple:
_lowerCAmelCase =self.layer_norm(__A )
_lowerCAmelCase =self.attention(
__A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.film(__A , __A )
_lowerCAmelCase =self.DenseReluDense(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(__A )
_lowerCAmelCase =NewGELUActivation()
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =self.act(self.wi_a(__A ) )
_lowerCAmelCase =self.wi_a(__A )
_lowerCAmelCase =hidden_gelu * hidden_linear
_lowerCAmelCase =self.dropout(__A )
_lowerCAmelCase =self.wo(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A=1E-6 ) -> int:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.ones(__A ) )
_lowerCAmelCase =eps
def UpperCamelCase__ ( self , __A ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A )
_lowerCAmelCase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__A , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , out_features * 2 , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.scale_bias(__A )
_lowerCAmelCase , _lowerCAmelCase =torch.chunk(__A , 2 , -1 )
_lowerCAmelCase =x * (1 + scale) + shift
return x
| 58
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =[0]
_lowerCAmelCase =[0]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
_lowerCAmelCase =[60]
_lowerCAmelCase =[10]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =3
_lowerCAmelCase =[1, 2, 3]
_lowerCAmelCase =[3, 2, 1]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 5 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =50
_lowerCAmelCase =[60, 100, 120]
_lowerCAmelCase =[10, 20, 30]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 220 )
if __name__ == "__main__":
unittest.main()
| 58
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : Tuple = BlenderbotSmallConfig
lowercase : List[Any] = {}
lowercase : Tuple = 'gelu'
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=False , __A=99 , __A=32 , __A=2 , __A=4 , __A=37 , __A=0.1 , __A=0.1 , __A=20 , __A=2 , __A=1 , __A=0 , ) -> Dict:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =pad_token_id
_lowerCAmelCase =bos_token_id
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCAmelCase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCAmelCase =tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCAmelCase =prepare_blenderbot_small_inputs_dict(__A , __A , __A )
return config, inputs_dict
def UpperCamelCase__ ( self , __A , __A ) -> List[Any]:
_lowerCAmelCase =TFBlenderbotSmallModel(config=__A ).get_decoder()
_lowerCAmelCase =inputs_dict['input_ids']
_lowerCAmelCase =input_ids[:1, :]
_lowerCAmelCase =inputs_dict['attention_mask'][:1, :]
_lowerCAmelCase =inputs_dict['head_mask']
_lowerCAmelCase =1
# first forward pass
_lowerCAmelCase =model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
_lowerCAmelCase , _lowerCAmelCase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase =ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCAmelCase =tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCAmelCase =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCAmelCase =model(__A , attention_mask=__A )[0]
_lowerCAmelCase =model(__A , attention_mask=__A , past_key_values=__A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCAmelCase =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCAmelCase =output_from_no_past[:, -3:, random_slice_idx]
_lowerCAmelCase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__A , __A , rtol=1E-3 )
def UpperCamelCase__ ( a__ , a__ , a__ , a__=None , a__=None , a__=None , a__=None , a__=None , ):
'''simple docstring'''
if attention_mask is None:
_lowerCAmelCase =tf.cast(tf.math.not_equal(a__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCAmelCase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCAmelCase =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCAmelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCAmelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[str] = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
lowercase : Optional[Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
lowercase : Union[str, Any] = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase : List[str] = True
lowercase : Optional[Any] = False
lowercase : Any = False
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =TFBlenderbotSmallModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__A )
def UpperCamelCase__ ( self ) -> str:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__A )
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
lowercase : Any = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
lowercase : Any = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase__ ( self ) -> Tuple:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.tokenizer(self.src_text , return_tensors='tf' )
_lowerCAmelCase =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__A , )
_lowerCAmelCase =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__A )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 58
|
'''simple docstring'''
lowercase_ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58
| 1
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 768 , ) -> Tuple:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.zeros(1 , __A ) )
_lowerCAmelCase =nn.Parameter(torch.ones(1 , __A ) )
def UpperCamelCase__ ( self , __A = None , __A = None , ) -> int:
_lowerCAmelCase =nn.Parameter(self.mean.to(__A ).to(__A ) )
_lowerCAmelCase =nn.Parameter(self.std.to(__A ).to(__A ) )
return self
def UpperCamelCase__ ( self , __A ) -> str:
_lowerCAmelCase =(embeds - self.mean) * 1.0 / self.std
return embeds
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =(embeds * self.std) + self.mean
return embeds
| 58
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase_ = '''sshleifer/mar_enro_6_3_student'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=__A , )
_lowerCAmelCase =F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
MarianMTModel.from_pretrained(__A )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowerCAmelCase =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowerCAmelCase =F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowerCAmelCase =['finetune.py'] + bash_script.split() + args
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationModule.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowerCAmelCase ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowerCAmelCase =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_lowerCAmelCase =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
_lowerCAmelCase =bash_script.replace('--fp16' , '' )
_lowerCAmelCase =6
_lowerCAmelCase =(
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationDistiller.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowerCAmelCase =distill_main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 58
| 1
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ = '''src/diffusers'''
lowercase_ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowercase_ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase_ = spec.loader.load_module()
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
return line.startswith(a__ ) or len(a__ ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' , a__ ) is not None
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =object_name.split('.' )
_lowerCAmelCase =0
# First let's find the module where our object lives.
_lowerCAmelCase =parts[i]
while i < len(a__ ) and not os.path.isfile(os.path.join(a__ , F'''{module}.py''' ) ):
i += 1
if i < len(a__ ):
_lowerCAmelCase =os.path.join(a__ , parts[i] )
if i >= len(a__ ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(a__ , F'''{module}.py''' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase =f.readlines()
# Now let's find the class / func in the code!
_lowerCAmelCase =''
_lowerCAmelCase =0
for name in parts[i + 1 :]:
while (
line_index < len(a__ ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(a__ ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_lowerCAmelCase =line_index
while line_index < len(a__ ) and _should_continue(lines[line_index] , a__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCAmelCase =lines[start_index:line_index]
return "".join(a__ )
lowercase_ = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowercase_ = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowercase_ = re.compile(r'''<FILL\s+[^>]*>''')
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =code.split('\n' )
_lowerCAmelCase =0
while idx < len(a__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(a__ ):
return re.search(r'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =len(get_indent(a__ ) ) > 0
if has_indent:
_lowerCAmelCase =F'''class Bla:\n{code}'''
_lowerCAmelCase =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=a__ )
_lowerCAmelCase =black.format_str(a__ , mode=a__ )
_lowerCAmelCase , _lowerCAmelCase =style_docstrings_in_code(a__ )
return result[len('class Bla:\n' ) :] if has_indent else result
def UpperCamelCase__ ( a__ , a__=False ):
'''simple docstring'''
with open(a__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase =f.readlines()
_lowerCAmelCase =[]
_lowerCAmelCase =0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(a__ ):
_lowerCAmelCase =_re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =search.groups()
_lowerCAmelCase =find_code_in_diffusers(a__ )
_lowerCAmelCase =get_indent(a__ )
_lowerCAmelCase =line_index + 1 if indent == theoretical_indent else line_index + 2
_lowerCAmelCase =theoretical_indent
_lowerCAmelCase =start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_lowerCAmelCase =True
while line_index < len(a__ ) and should_continue:
line_index += 1
if line_index >= len(a__ ):
break
_lowerCAmelCase =lines[line_index]
_lowerCAmelCase =_should_continue(a__ , a__ ) and re.search(F'''^{indent}# End copy''' , a__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCAmelCase =lines[start_index:line_index]
_lowerCAmelCase =''.join(a__ )
# Remove any nested `Copied from` comments to avoid circular copies
_lowerCAmelCase =[line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(a__ ) is None]
_lowerCAmelCase ='\n'.join(a__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(a__ ) > 0:
_lowerCAmelCase =replace_pattern.replace('with' , '' ).split(',' )
_lowerCAmelCase =[_re_replace_pattern.search(a__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =pattern.groups()
_lowerCAmelCase =re.sub(a__ , a__ , a__ )
if option.strip() == "all-casing":
_lowerCAmelCase =re.sub(obja.lower() , obja.lower() , a__ )
_lowerCAmelCase =re.sub(obja.upper() , obja.upper() , a__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_lowerCAmelCase =blackify(lines[start_index - 1] + theoretical_code )
_lowerCAmelCase =theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_lowerCAmelCase =lines[:start_index] + [theoretical_code] + lines[line_index:]
_lowerCAmelCase =start_index + 1
if overwrite and len(a__ ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(a__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(a__ )
return diffs
def UpperCamelCase__ ( a__ = False ):
'''simple docstring'''
_lowerCAmelCase =glob.glob(os.path.join(a__ , '**/*.py' ) , recursive=a__ )
_lowerCAmelCase =[]
for filename in all_files:
_lowerCAmelCase =is_copy_consistent(a__ , a__ )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(a__ ) > 0:
_lowerCAmelCase ='\n'.join(a__ )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowercase_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 58
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'sequence-classification'
def __init__( self , __A ) -> List[Any]:
if type(__A ) == dict:
_lowerCAmelCase =Namespace(**__A )
_lowerCAmelCase =glue_output_modes[hparams.task]
_lowerCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def UpperCamelCase__ ( self , **__A ) -> Any:
return self.model(**__A )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase =outputs[0]
_lowerCAmelCase =self.trainer.lr_schedulers[0]['scheduler']
_lowerCAmelCase ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.hparams
_lowerCAmelCase =processors[args.task]()
_lowerCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase =self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , __A )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_lowerCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_lowerCAmelCase =convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , __A )
torch.save(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A = False ) -> DataLoader:
_lowerCAmelCase ='dev' if mode == 'test' else mode
_lowerCAmelCase =self._feature_file(__A )
logger.info('Loading features from cached file %s' , __A )
_lowerCAmelCase =torch.load(__A )
_lowerCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def UpperCamelCase__ ( self , __A , __A ) -> List[str]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase , _lowerCAmelCase =outputs[:2]
_lowerCAmelCase =logits.detach().cpu().numpy()
_lowerCAmelCase =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __A ) -> tuple:
_lowerCAmelCase =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_lowerCAmelCase =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =np.squeeze(__A )
_lowerCAmelCase =np.concatenate([x['target'] for x in outputs] , axis=0 )
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_lowerCAmelCase =dict(results.items() )
_lowerCAmelCase =results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __A , __A ) -> Any:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'--max_seq_length' , default=128 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=__A , required=__A , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=__A , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
add_generic_args(a__ , os.getcwd() )
_lowerCAmelCase =GLUETransformer.add_model_specific_args(a__ , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase =os.path.join(
'./results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_lowerCAmelCase =GLUETransformer(a__ )
_lowerCAmelCase =generic_train(a__ , a__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=a__ ) )
_lowerCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(a__ )
if __name__ == "__main__":
main()
| 58
| 1
|
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowercase_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A ) -> Tuple:
super().__init__()
_lowerCAmelCase =torchvision.models.resnetaaa(pretrained=__A )
_lowerCAmelCase =list(model.children() )[:-2]
_lowerCAmelCase =nn.Sequential(*__A )
_lowerCAmelCase =nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCamelCase__ ( self , __A ) -> List[Any]:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
_lowerCAmelCase =self.pool(self.model(__A ) )
_lowerCAmelCase =torch.flatten(__A , start_dim=2 )
_lowerCAmelCase =out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =[json.loads(__A ) for l in open(__A )]
_lowerCAmelCase =os.path.dirname(__A )
_lowerCAmelCase =tokenizer
_lowerCAmelCase =labels
_lowerCAmelCase =len(__A )
_lowerCAmelCase =max_seq_length
_lowerCAmelCase =transforms
def __len__( self ) -> Tuple:
return len(self.data )
def __getitem__( self , __A ) -> Dict:
_lowerCAmelCase =torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=__A ) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =sentence[0], sentence[1:-1], sentence[-1]
_lowerCAmelCase =sentence[: self.max_seq_length]
_lowerCAmelCase =torch.zeros(self.n_classes )
_lowerCAmelCase =1
_lowerCAmelCase =Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
_lowerCAmelCase =self.transforms(__A )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =[len(row['sentence'] ) for row in batch]
_lowerCAmelCase , _lowerCAmelCase =len(a__ ), max(a__ )
_lowerCAmelCase =torch.zeros(a__ , a__ , dtype=torch.long )
_lowerCAmelCase =torch.zeros(a__ , a__ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(a__ , a__ ) ):
_lowerCAmelCase =input_row['sentence']
_lowerCAmelCase =1
_lowerCAmelCase =torch.stack([row['image'] for row in batch] )
_lowerCAmelCase =torch.stack([row['label'] for row in batch] )
_lowerCAmelCase =torch.stack([row['image_start_token'] for row in batch] )
_lowerCAmelCase =torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def UpperCamelCase__ ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def UpperCamelCase__ ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 58
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 1
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=7 , __A=False , __A=True , __A=False , __A=False , __A=19 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=3 , __A=4 , __A=None , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_token_type_ids
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =type_sequence_label_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =num_labels
_lowerCAmelCase =num_choices
_lowerCAmelCase =scope
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__A , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =EsmForProteinFolding(config=__A ).float()
model.to(__A )
model.eval()
_lowerCAmelCase =model(__A , attention_mask=__A )
_lowerCAmelCase =model(__A )
_lowerCAmelCase =model(__A )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : Dict = False
lowercase : str = (EsmForProteinFolding,) if is_torch_available() else ()
lowercase : Dict = ()
lowercase : Union[str, Any] = {} if is_torch_available() else {}
lowercase : Dict = False
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =EsmFoldModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__A , hidden_size=37 )
def UpperCamelCase__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
@unittest.skip('Does not support attention outputs' )
def UpperCamelCase__ ( self ) -> Tuple:
pass
@unittest.skip
def UpperCamelCase__ ( self ) -> str:
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase__ ( self ) -> Optional[Any]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase__ ( self ) -> List[Any]:
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def UpperCamelCase__ ( self ) -> int:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def UpperCamelCase__ ( self ) -> Dict:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def UpperCamelCase__ ( self ) -> Any:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def UpperCamelCase__ ( self ) -> Dict:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def UpperCamelCase__ ( self ) -> List[Any]:
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
@unittest.skip('ESMFold only has one output format.' )
def UpperCamelCase__ ( self ) -> List[str]:
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def UpperCamelCase__ ( self ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support input chunking.' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def UpperCamelCase__ ( self ) -> str:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def UpperCamelCase__ ( self ) -> str:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def UpperCamelCase__ ( self ) -> Any:
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def UpperCamelCase__ ( self ) -> List[str]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase__ ( self ) -> int:
pass
@require_torch
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
_lowerCAmelCase =torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_lowerCAmelCase =model(__A )['positions']
_lowerCAmelCase =torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __A , atol=1E-4 ) )
| 58
|
'''simple docstring'''
from PIL import Image
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def brightness(a__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(a__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 58
| 1
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
lowercase_ = '''scheduler_config.json'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = 1
lowercase : Any = 2
lowercase : List[str] = 3
lowercase : Dict = 4
lowercase : str = 5
@dataclass
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : jnp.ndarray
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : Tuple = SCHEDULER_CONFIG_NAME
lowercase : List[Any] = ['dtype']
lowercase : str = []
lowercase : Union[str, Any] = True
@classmethod
def UpperCamelCase__ ( cls , __A = None , __A = None , __A=False , **__A , ) -> Optional[int]:
_lowerCAmelCase , _lowerCAmelCase =cls.load_config(
pretrained_model_name_or_path=__A , subfolder=__A , return_unused_kwargs=__A , **__A , )
_lowerCAmelCase , _lowerCAmelCase =cls.from_config(__A , return_unused_kwargs=__A , **__A )
if hasattr(__A , 'create_state' ) and getattr(__A , 'has_state' , __A ):
_lowerCAmelCase =scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCamelCase__ ( self , __A , __A = False , **__A ) -> Union[str, Any]:
self.save_config(save_directory=__A , push_to_hub=__A , **__A )
@property
def UpperCamelCase__ ( self ) -> Any:
return self._get_compatibles()
@classmethod
def UpperCamelCase__ ( cls ) -> Tuple:
_lowerCAmelCase =list(set([cls.__name__] + cls._compatibles ) )
_lowerCAmelCase =importlib.import_module(__name__.split('.' )[0] )
_lowerCAmelCase =[
getattr(__A , __A ) for c in compatible_classes_str if hasattr(__A , __A )
]
return compatible_classes
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
assert len(a__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(a__ ) - x.ndim) ) , a__ )
def UpperCamelCase__ ( a__ , a__=0.999 , a__=jnp.floataa ):
'''simple docstring'''
def alpha_bar(a__ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
_lowerCAmelCase =[]
for i in range(a__ ):
_lowerCAmelCase =i / num_diffusion_timesteps
_lowerCAmelCase =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(a__ ) / alpha_bar(a__ ) , a__ ) )
return jnp.array(a__ , dtype=a__ )
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : jnp.ndarray
lowercase : jnp.ndarray
lowercase : jnp.ndarray
@classmethod
def UpperCamelCase__ ( cls , __A ) -> Dict:
_lowerCAmelCase =scheduler.config
if config.trained_betas is not None:
_lowerCAmelCase =jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_lowerCAmelCase =jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase =(
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase =betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
_lowerCAmelCase =1.0 - betas
_lowerCAmelCase =jnp.cumprod(__A , axis=0 )
return cls(
alphas=__A , betas=__A , alphas_cumprod=__A , )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =state.alphas_cumprod
_lowerCAmelCase =alphas_cumprod[timesteps] ** 0.5
_lowerCAmelCase =sqrt_alpha_prod.flatten()
_lowerCAmelCase =broadcast_to_shape_from_left(a__ , original_samples.shape )
_lowerCAmelCase =(1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCAmelCase =sqrt_one_minus_alpha_prod.flatten()
_lowerCAmelCase =broadcast_to_shape_from_left(a__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase =get_sqrt_alpha_prod(a__ , a__ , a__ , a__ )
_lowerCAmelCase =sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase =get_sqrt_alpha_prod(a__ , a__ , a__ , a__ )
_lowerCAmelCase =sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 58
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[Any]:
_lowerCAmelCase =TOKEN
HfFolder.save_token(__A )
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id='test-config' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='valid_org/test-config-org' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> List[str]:
CustomConfig.register_for_auto_class()
_lowerCAmelCase =CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowerCAmelCase =AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase =c.n_embd + 1 # int
_lowerCAmelCase =c.resid_pdrop + 1.0 # float
_lowerCAmelCase =not c.scale_attn_weights # bool
_lowerCAmelCase =c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__A , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__A , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__A , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__A , c.summary_type , 'mismatch for key: summary_type' )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =PretrainedConfig()
_lowerCAmelCase =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowerCAmelCase =[key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(__A )}.''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase =mock.Mock()
_lowerCAmelCase =500
_lowerCAmelCase ={}
_lowerCAmelCase =HTTPError
_lowerCAmelCase ={}
# Download this model to make sure it's in the cache.
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__A ) as mock_head:
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase =BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =AutoConfig.from_pretrained('bert-base-cased' )
_lowerCAmelCase =['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
_lowerCAmelCase =2
json.dump(configuration.to_dict() , open(os.path.join(__A , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase =['config.42.0.0.json']
_lowerCAmelCase =768
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , 'config.4.0.0.json' ) , os.path.join(__A , 'config.42.0.0.json' ) )
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase__ ( self ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase ='hf-internal-testing/test-two-configs'
import transformers as new_transformers
_lowerCAmelCase ='v4.0.0'
_lowerCAmelCase , _lowerCAmelCase =new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase ='v3.0.0'
_lowerCAmelCase =old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 768 )
| 58
| 1
|
'''simple docstring'''
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self ) -> List[str]:
_lowerCAmelCase =''
_lowerCAmelCase =''
_lowerCAmelCase =[]
def UpperCamelCase__ ( self , __A , __A ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_lowerCAmelCase =self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_lowerCAmelCase =self.__min_dist_top_down_dp(__A , n - 1 )
_lowerCAmelCase =self.__min_dist_top_down_dp(m - 1 , __A )
_lowerCAmelCase =self.__min_dist_top_down_dp(m - 1 , n - 1 )
_lowerCAmelCase =1 + min(__A , __A , __A )
return self.dp[m][n]
def UpperCamelCase__ ( self , __A , __A ) -> int:
_lowerCAmelCase =worda
_lowerCAmelCase =worda
_lowerCAmelCase =[[-1 for _ in range(len(__A ) )] for _ in range(len(__A ) )]
return self.__min_dist_top_down_dp(len(__A ) - 1 , len(__A ) - 1 )
def UpperCamelCase__ ( self , __A , __A ) -> int:
_lowerCAmelCase =worda
_lowerCAmelCase =worda
_lowerCAmelCase =len(__A )
_lowerCAmelCase =len(__A )
_lowerCAmelCase =[[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_lowerCAmelCase =j
elif j == 0: # second string is empty
_lowerCAmelCase =i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_lowerCAmelCase =self.dp[i - 1][j - 1]
else:
_lowerCAmelCase =self.dp[i][j - 1]
_lowerCAmelCase =self.dp[i - 1][j]
_lowerCAmelCase =self.dp[i - 1][j - 1]
_lowerCAmelCase =1 + min(__A , __A , __A )
return self.dp[m][n]
if __name__ == "__main__":
lowercase_ = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
lowercase_ = input('''Enter the first string: ''').strip()
lowercase_ = input('''Enter the second string: ''').strip()
print()
print(F'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(F'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 58
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58
| 1
|
'''simple docstring'''
lowercase_ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =len(a__ ) // 2
# choose the middle 3 elements
_lowerCAmelCase =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 1
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Dict = ['image_processor', 'tokenizer']
lowercase : Dict = 'AutoImageProcessor'
lowercase : Tuple = 'AutoTokenizer'
def __init__( self , __A , __A ) -> Optional[int]:
super().__init__(__A , __A )
_lowerCAmelCase =self.image_processor
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Optional[Any]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> List[str]:
return ["input_ids", "attention_mask", "pixel_values"]
| 58
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.txt'''}
lowercase_ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowercase_ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowercase_ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ConvBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ) -> Union[str, Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
_lowerCAmelCase =getattr(__A , normalizer_state.pop('type' ) )
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =strip_accents
_lowerCAmelCase =tokenize_chinese_chars
_lowerCAmelCase =normalizer_class(**__A )
_lowerCAmelCase =do_lower_case
def UpperCamelCase__ ( self , __A , __A=None ) -> int:
_lowerCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 58
| 1
|
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : Optional[int] = LxmertTokenizer
lowercase : int = LxmertTokenizerFast
lowercase : Union[str, Any] = True
lowercase : str = True
def UpperCamelCase__ ( self ) -> str:
super().setUp()
_lowerCAmelCase =[
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowerCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase__ ( self , __A ) -> int:
_lowerCAmelCase ='UNwant\u00E9d,running'
_lowerCAmelCase ='unwanted, running'
return input_text, output_text
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =self.tokenizer_class(self.vocab_file )
_lowerCAmelCase =tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__A , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [7, 4, 5, 10, 8, 9] )
def UpperCamelCase__ ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =self.get_rust_tokenizer()
_lowerCAmelCase ='I was born in 92000, and this is falsé.'
_lowerCAmelCase =tokenizer.tokenize(__A )
_lowerCAmelCase =rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
_lowerCAmelCase =tokenizer.encode(__A , add_special_tokens=__A )
_lowerCAmelCase =rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
_lowerCAmelCase =self.get_rust_tokenizer()
_lowerCAmelCase =tokenizer.encode(__A )
_lowerCAmelCase =rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
| 58
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = ['image_processor', 'tokenizer']
lowercase : Any = 'CLIPImageProcessor'
lowercase : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __A=None , __A=None , **__A ) -> str:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
_lowerCAmelCase =kwargs.pop('feature_extractor' )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Optional[int]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 58
| 1
|
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
_lowerCAmelCase =precision
_lowerCAmelCase =ceil(precision / 1_4 )
_lowerCAmelCase =4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
_lowerCAmelCase =1
_lowerCAmelCase =1_3_5_9_1_4_0_9
_lowerCAmelCase =Decimal(a__ )
for k in range(1 , a__ ):
_lowerCAmelCase =factorial(6 * k ) // (factorial(3 * k ) * factorial(a__ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowercase_ = 50
print(F'The first {n} digits of pi is: {pi(n)}')
| 58
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ) -> str:
super().__init__()
_lowerCAmelCase =nn.Sequential(
nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , )
_lowerCAmelCase =nn.Embedding(__A , __A )
_lowerCAmelCase =False
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.ModuleList()
for lyr_num in range(__A ):
# FiLM conditional T5 decoder
_lowerCAmelCase =DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A )
self.decoders.append(__A )
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Any:
_lowerCAmelCase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase =self.conditioning_emb(__A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase =torch.broadcast_to(
torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase =self.position_encoding(__A )
_lowerCAmelCase =self.continuous_inputs_projection(__A )
inputs += position_encodings
_lowerCAmelCase =self.dropout(__A )
# decoder: No padding present.
_lowerCAmelCase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase =[(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase =lyr(
__A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0]
_lowerCAmelCase =self.decoder_norm(__A )
_lowerCAmelCase =self.post_dropout(__A )
_lowerCAmelCase =self.spec_out(__A )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Any:
_lowerCAmelCase =self.layer[0](
__A , conditioning_emb=__A , attention_mask=__A , )
if encoder_hidden_states is not None:
_lowerCAmelCase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase =self.layer[1](
__A , key_value_states=__A , attention_mask=__A , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase =self.layer[-1](__A , __A )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> List[Any]:
# pre_self_attention_layer_norm
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.FiLMLayer(__A , __A )
# Self-attention block
_lowerCAmelCase =self.attention(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[int]:
super().__init__()
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> Tuple:
_lowerCAmelCase =self.layer_norm(__A )
_lowerCAmelCase =self.attention(
__A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.film(__A , __A )
_lowerCAmelCase =self.DenseReluDense(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(__A )
_lowerCAmelCase =NewGELUActivation()
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =self.act(self.wi_a(__A ) )
_lowerCAmelCase =self.wi_a(__A )
_lowerCAmelCase =hidden_gelu * hidden_linear
_lowerCAmelCase =self.dropout(__A )
_lowerCAmelCase =self.wo(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A=1E-6 ) -> int:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.ones(__A ) )
_lowerCAmelCase =eps
def UpperCamelCase__ ( self , __A ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A )
_lowerCAmelCase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__A , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , out_features * 2 , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.scale_bias(__A )
_lowerCAmelCase , _lowerCAmelCase =torch.chunk(__A , 2 , -1 )
_lowerCAmelCase =x * (1 + scale) + shift
return x
| 58
| 1
|
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase_ = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 58
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
| 1
|
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__A , 'num_attention_heads' ) )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=64 , __A=3 , __A=3 , __A=2 , __A=1 , __A=16 , __A=[128, 256, 384] , __A=[4, 6, 8] , __A=[2, 3, 4] , __A=[16, 16, 16] , __A=0 , __A=[2, 2, 2] , __A=[2, 2, 2] , __A=0.02 , __A=True , __A=True , __A=2 , ) -> Optional[Any]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =kernel_size
_lowerCAmelCase =stride
_lowerCAmelCase =padding
_lowerCAmelCase =hidden_sizes
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =depths
_lowerCAmelCase =key_dim
_lowerCAmelCase =drop_path_rate
_lowerCAmelCase =patch_size
_lowerCAmelCase =attention_ratio
_lowerCAmelCase =mlp_ratio
_lowerCAmelCase =initializer_range
_lowerCAmelCase =[
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
_lowerCAmelCase =is_training
_lowerCAmelCase =use_labels
_lowerCAmelCase =num_labels
_lowerCAmelCase =initializer_range
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase =self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> List[str]:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase =LevitModel(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(__A )
_lowerCAmelCase =(self.image_size, self.image_size)
_lowerCAmelCase , _lowerCAmelCase =image_size[0], image_size[1]
for _ in range(4 ):
_lowerCAmelCase =floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
_lowerCAmelCase =floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def UpperCamelCase__ ( self , __A , __A , __A ) -> int:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =LevitForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase : Optional[Any] = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase : Union[str, Any] = False
lowercase : List[Any] = False
lowercase : str = False
lowercase : int = False
lowercase : Dict = False
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =LevitModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def UpperCamelCase__ ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ) -> Optional[int]:
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def UpperCamelCase__ ( self ) -> List[Any]:
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def UpperCamelCase__ ( self ) -> List[str]:
pass
@unittest.skip(reason='Levit does not output attentions' )
def UpperCamelCase__ ( self ) -> Any:
pass
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(__A )
_lowerCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , __A )
def UpperCamelCase__ ( self ) -> str:
def check_hidden_states_output(__A , __A , __A ):
_lowerCAmelCase =model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCAmelCase =model(**self._prepare_for_class(__A , __A ) )
_lowerCAmelCase =outputs.hidden_states
_lowerCAmelCase =len(self.model_tester.depths ) + 1
self.assertEqual(len(__A ) , __A )
_lowerCAmelCase =(self.model_tester.image_size, self.model_tester.image_size)
_lowerCAmelCase , _lowerCAmelCase =image_size[0], image_size[1]
for _ in range(4 ):
_lowerCAmelCase =floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
_lowerCAmelCase =floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase =True
check_hidden_states_output(__A , __A , __A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase__ ( self ) -> str:
pass
def UpperCamelCase__ ( self , __A , __A , __A=False ) -> Any:
_lowerCAmelCase =super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def UpperCamelCase__ ( self ) -> str:
if not self.model_tester.is_training:
return
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__A )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
_lowerCAmelCase =model_class(__A )
model.to(__A )
model.train()
_lowerCAmelCase =self._prepare_for_class(__A , __A , return_labels=__A )
_lowerCAmelCase =model(**__A ).loss
loss.backward()
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCAmelCase =False
_lowerCAmelCase =True
for model_class in self.all_model_classes:
if model_class in get_values(__A ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
_lowerCAmelCase =model_class(__A )
model.gradient_checkpointing_enable()
model.to(__A )
model.train()
_lowerCAmelCase =self._prepare_for_class(__A , __A , return_labels=__A )
_lowerCAmelCase =model(**__A ).loss
loss.backward()
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =[
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__A ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
_lowerCAmelCase =problem_type['title']
_lowerCAmelCase =problem_type['num_labels']
_lowerCAmelCase =model_class(__A )
model.to(__A )
model.train()
_lowerCAmelCase =self._prepare_for_class(__A , __A , return_labels=__A )
if problem_type["num_labels"] > 1:
_lowerCAmelCase =inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
_lowerCAmelCase =inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__A ) as warning_list:
_lowerCAmelCase =model(**__A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCamelCase__ ( self ) -> List[Any]:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =LevitModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self ) -> Any:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__A )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=__A , return_tensors='pt' ).to(__A )
# forward pass
with torch.no_grad():
_lowerCAmelCase =model(**__A )
# verify the logits
_lowerCAmelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __A )
_lowerCAmelCase =torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) )
| 58
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = 'deberta-v2'
def __init__( self , __A=12_8100 , __A=1536 , __A=24 , __A=24 , __A=6144 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0 , __A=0.02 , __A=1E-7 , __A=False , __A=-1 , __A=0 , __A=True , __A=None , __A=0 , __A="gelu" , **__A , ) -> Dict:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =relative_attention
_lowerCAmelCase =max_relative_positions
_lowerCAmelCase =pad_token_id
_lowerCAmelCase =position_biased_input
# Backwards compatibility
if type(__A ) == str:
_lowerCAmelCase =[x.strip() for x in pos_att_type.lower().split('|' )]
_lowerCAmelCase =pos_att_type
_lowerCAmelCase =vocab_size
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =kwargs.get('pooler_hidden_size' , __A )
_lowerCAmelCase =pooler_dropout
_lowerCAmelCase =pooler_hidden_act
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def UpperCamelCase__ ( self ) -> int:
return 12
def UpperCamelCase__ ( self , __A , __A = -1 , __A = -1 , __A = -1 , __A = False , __A = None , __A = 3 , __A = 40 , __A = 40 , __A = None , ) -> Mapping[str, Any]:
_lowerCAmelCase =super().generate_dummy_inputs(preprocessor=__A , framework=__A )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 58
|
'''simple docstring'''
def UpperCamelCase__ ( a__ = 1_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =2**power
_lowerCAmelCase =0
while n:
_lowerCAmelCase , _lowerCAmelCase =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
| 1
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if n == 1 or not isinstance(a__ , a__ ):
return 0
elif n == 2:
return 1
else:
_lowerCAmelCase =[0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =0
_lowerCAmelCase =2
while digits < n:
index += 1
_lowerCAmelCase =len(str(fibonacci(a__ ) ) )
return index
def UpperCamelCase__ ( a__ = 1_0_0_0 ):
'''simple docstring'''
return fibonacci_digits_index(a__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCAmelCase =set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 1
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
lowercase_ = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
lowercase_ = re.compile(r'''([a-z\d])([A-Z])''')
lowercase_ = re.compile(r'''(?<!_)_(?!_)''')
lowercase_ = re.compile(r'''(_{2,})''')
lowercase_ = r'''^\w+(\.\w+)*$'''
lowercase_ = r'''<>:/\|?*'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =_uppercase_uppercase_re.sub(r'\1_\2' , a__ )
_lowerCAmelCase =_lowercase_uppercase_re.sub(r'\1_\2' , a__ )
return name.lower()
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =_single_underscore_re.split(a__ )
_lowerCAmelCase =[_multiple_underscores_re.split(a__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(a__ ) if n != '' )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if os.path.basename(a__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(a__ )
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
if os.path.basename(a__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , a__ ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(a__ )}-{split}'''
def UpperCamelCase__ ( a__ , a__ , a__ , a__=None ):
'''simple docstring'''
_lowerCAmelCase =filename_prefix_for_split(a__ , a__ )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
_lowerCAmelCase =os.path.join(a__ , a__ )
return F'''{filepath}*'''
def UpperCamelCase__ ( a__ , a__ , a__ , a__=None , a__=None ):
'''simple docstring'''
_lowerCAmelCase =filename_prefix_for_split(a__ , a__ )
_lowerCAmelCase =os.path.join(a__ , a__ )
if shard_lengths:
_lowerCAmelCase =len(a__ )
_lowerCAmelCase =[F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(a__ )]
if filetype_suffix:
_lowerCAmelCase =[filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
_lowerCAmelCase =prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 58
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 58
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : str = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase : Union[str, Any] = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase : List[str] = False
lowercase : str = False
def UpperCamelCase__ ( self , __A , __A , __A=False ) -> Optional[int]:
_lowerCAmelCase =super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
_lowerCAmelCase =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=32 , __A=2 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=3 , __A=4 , __A=None , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_token_type_ids
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =type_sequence_label_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =num_labels
_lowerCAmelCase =num_choices
_lowerCAmelCase =scope
_lowerCAmelCase =embedding_size
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =None
if self.use_token_type_ids:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase =MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase =TFMobileBertModel(config=__A )
_lowerCAmelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowerCAmelCase =model(__A )
_lowerCAmelCase =[input_ids, input_mask]
_lowerCAmelCase =model(__A )
_lowerCAmelCase =model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> int:
_lowerCAmelCase =TFMobileBertForMaskedLM(config=__A )
_lowerCAmelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowerCAmelCase =model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> int:
_lowerCAmelCase =TFMobileBertForNextSentencePrediction(config=__A )
_lowerCAmelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowerCAmelCase =model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[int]:
_lowerCAmelCase =TFMobileBertForPreTraining(config=__A )
_lowerCAmelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowerCAmelCase =model(__A )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =TFMobileBertForSequenceClassification(config=__A )
_lowerCAmelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowerCAmelCase =model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> Any:
_lowerCAmelCase =self.num_choices
_lowerCAmelCase =TFMobileBertForMultipleChoice(config=__A )
_lowerCAmelCase =tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase =tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase =tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_lowerCAmelCase =model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =TFMobileBertForTokenClassification(config=__A )
_lowerCAmelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowerCAmelCase =model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[int]:
_lowerCAmelCase =TFMobileBertForQuestionAnswering(config=__A )
_lowerCAmelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowerCAmelCase =model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =TFMobileBertModelTest.TFMobileBertModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__A , hidden_size=37 )
def UpperCamelCase__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__A )
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__A )
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__A )
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__A )
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__A )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__A )
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__A )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__A )
@slow
def UpperCamelCase__ ( self ) -> Tuple:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase =TFMobileBertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
_lowerCAmelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase =model(__A )[0]
_lowerCAmelCase =[1, 6, 3_0522]
self.assertEqual(output.shape , __A )
_lowerCAmelCase =tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __A , atol=1E-4 )
| 58
|
'''simple docstring'''
lowercase_ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='Morse code here!'
print(a__ )
_lowerCAmelCase =encrypt(a__ )
print(a__ )
_lowerCAmelCase =decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 58
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase_ = logging.get_logger('''transformers.models.encodec''')
lowercase_ = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowercase_ = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowercase_ = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowercase_ = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowercase_ = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowercase_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase_ = []
lowercase_ = []
def UpperCamelCase__ ( a__ , a__ , a__ , a__ , a__ ):
'''simple docstring'''
for attribute in key.split('.' ):
_lowerCAmelCase =getattr(a__ , a__ )
if weight_type is not None:
_lowerCAmelCase =getattr(a__ , a__ ).shape
else:
_lowerCAmelCase =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_lowerCAmelCase =value
elif weight_type == "weight_g":
_lowerCAmelCase =value
elif weight_type == "weight_v":
_lowerCAmelCase =value
elif weight_type == "bias":
_lowerCAmelCase =value
elif weight_type == "running_mean":
_lowerCAmelCase =value
elif weight_type == "running_var":
_lowerCAmelCase =value
elif weight_type == "num_batches_tracked":
_lowerCAmelCase =value
elif weight_type == "weight_ih_l0":
_lowerCAmelCase =value
elif weight_type == "weight_hh_l0":
_lowerCAmelCase =value
elif weight_type == "bias_ih_l0":
_lowerCAmelCase =value
elif weight_type == "bias_hh_l0":
_lowerCAmelCase =value
elif weight_type == "weight_ih_l1":
_lowerCAmelCase =value
elif weight_type == "weight_hh_l1":
_lowerCAmelCase =value
elif weight_type == "bias_ih_l1":
_lowerCAmelCase =value
elif weight_type == "bias_hh_l1":
_lowerCAmelCase =value
else:
_lowerCAmelCase =value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_lowerCAmelCase , _lowerCAmelCase =key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =[]
if model_name == "encodec_24khz" or "encodec_32khz":
_lowerCAmelCase =MAPPING_24K
elif model_name == "encodec_48khz":
_lowerCAmelCase =MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(a__ , a__ ):
logger.info(F'''{name} was ignored''' )
continue
_lowerCAmelCase =False
for key, mapped_key in MAPPING.items():
if "*" in key:
_lowerCAmelCase , _lowerCAmelCase =key.split('.*.' )
if prefix in name and suffix in name:
_lowerCAmelCase =suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
_lowerCAmelCase =True
if "*" in mapped_key:
_lowerCAmelCase =name.split(a__ )[0].split('.' )[-2]
_lowerCAmelCase =mapped_key.replace('*' , a__ )
if "weight_g" in name:
_lowerCAmelCase ='weight_g'
elif "weight_v" in name:
_lowerCAmelCase ='weight_v'
elif "weight_ih_l0" in name:
_lowerCAmelCase ='weight_ih_l0'
elif "weight_hh_l0" in name:
_lowerCAmelCase ='weight_hh_l0'
elif "bias_ih_l0" in name:
_lowerCAmelCase ='bias_ih_l0'
elif "bias_hh_l0" in name:
_lowerCAmelCase ='bias_hh_l0'
elif "weight_ih_l1" in name:
_lowerCAmelCase ='weight_ih_l1'
elif "weight_hh_l1" in name:
_lowerCAmelCase ='weight_hh_l1'
elif "bias_ih_l1" in name:
_lowerCAmelCase ='bias_ih_l1'
elif "bias_hh_l1" in name:
_lowerCAmelCase ='bias_hh_l1'
elif "bias" in name:
_lowerCAmelCase ='bias'
elif "weight" in name:
_lowerCAmelCase ='weight'
elif "running_mean" in name:
_lowerCAmelCase ='running_mean'
elif "running_var" in name:
_lowerCAmelCase ='running_var'
elif "num_batches_tracked" in name:
_lowerCAmelCase ='num_batches_tracked'
else:
_lowerCAmelCase =None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def UpperCamelCase__ ( a__ , a__ , a__ , a__=None , a__=None , ):
'''simple docstring'''
if config_path is not None:
_lowerCAmelCase =EncodecConfig.from_pretrained(a__ )
else:
_lowerCAmelCase =EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_lowerCAmelCase =[8, 5, 4, 4]
_lowerCAmelCase =[2.2]
_lowerCAmelCase =6_4
_lowerCAmelCase =3_2_0_0_0
_lowerCAmelCase =2_0_4_8
_lowerCAmelCase =False
_lowerCAmelCase =False
_lowerCAmelCase =False
elif model_name == "encodec_48khz":
_lowerCAmelCase =[8, 5, 4, 2]
_lowerCAmelCase =[3.0, 6.0, 12.0, 24.0]
_lowerCAmelCase =4_8_0_0_0
_lowerCAmelCase =2
_lowerCAmelCase =False
_lowerCAmelCase ='time_group_norm'
_lowerCAmelCase =True
_lowerCAmelCase =1.0
_lowerCAmelCase =0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_lowerCAmelCase =EncodecModel(a__ )
_lowerCAmelCase =EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(a__ )
_lowerCAmelCase =torch.load(a__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_lowerCAmelCase =original_checkpoint['best_state']
recursively_load_weights(a__ , a__ , a__ )
model.save_pretrained(a__ )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(a__ )
model.push_to_hub(a__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowercase_ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 58
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'data2vec-text'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 58
| 1
|
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __lt__( self , __A ) -> Tuple:
return self[-1] < other[-1]
def __eq__( self , __A ) -> Any:
return self[-1] == other[-1]
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =[]
# sort into stacks
for element in collection:
_lowerCAmelCase =Stack([element] )
_lowerCAmelCase =bisect_left(a__ , a__ )
if i != len(a__ ):
stacks[i].append(a__ )
else:
stacks.append(a__ )
# use a heap-based merge to merge stack efficiently
_lowerCAmelCase =merge(*(reversed(a__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowercase_ = input('''Enter numbers separated by a comma:\n''').strip()
lowercase_ = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 58
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = IFPipeline
lowercase : Tuple = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : int = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase__ ( self ) -> str:
return self._get_dummy_components()
def UpperCamelCase__ ( self , __A , __A=0 ) -> int:
if str(__A ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(__A )
else:
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(__A )
_lowerCAmelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ) -> str:
self._test_save_load_local()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
# if
_lowerCAmelCase =IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase =IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__A , tokenizer=__A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_lowerCAmelCase , _lowerCAmelCase =pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase =None
_lowerCAmelCase =None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase =IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase =IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase =IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase =IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__A , __A , __A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 58
| 1
|
'''simple docstring'''
lowercase_ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='Morse code here!'
print(a__ )
_lowerCAmelCase =encrypt(a__ )
print(a__ )
_lowerCAmelCase =decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 58
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =[0]
_lowerCAmelCase =[0]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
_lowerCAmelCase =[60]
_lowerCAmelCase =[10]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =3
_lowerCAmelCase =[1, 2, 3]
_lowerCAmelCase =[3, 2, 1]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 5 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =50
_lowerCAmelCase =[60, 100, 120]
_lowerCAmelCase =[10, 20, 30]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 220 )
if __name__ == "__main__":
unittest.main()
| 58
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Dict = 'roformer'
def __init__( self , __A=5_0000 , __A=None , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=1536 , __A=2 , __A=0.02 , __A=1E-12 , __A=0 , __A=False , __A=True , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size if embedding_size is None else embedding_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =rotary_value
_lowerCAmelCase =use_cache
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 58
|
'''simple docstring'''
lowercase_ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58
| 1
|
'''simple docstring'''
import math
import sys
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if number != int(a__ ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
_lowerCAmelCase =[-1] * (number + 1)
_lowerCAmelCase =0
for i in range(1 , number + 1 ):
_lowerCAmelCase =sys.maxsize
_lowerCAmelCase =int(math.sqrt(a__ ) )
for j in range(1 , root + 1 ):
_lowerCAmelCase =1 + answers[i - (j**2)]
_lowerCAmelCase =min(a__ , a__ )
_lowerCAmelCase =answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase_ = '''sshleifer/mar_enro_6_3_student'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=__A , )
_lowerCAmelCase =F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
MarianMTModel.from_pretrained(__A )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowerCAmelCase =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowerCAmelCase =F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowerCAmelCase =['finetune.py'] + bash_script.split() + args
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationModule.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowerCAmelCase ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowerCAmelCase =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_lowerCAmelCase =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
_lowerCAmelCase =bash_script.replace('--fp16' , '' )
_lowerCAmelCase =6
_lowerCAmelCase =(
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationDistiller.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowerCAmelCase =distill_main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 58
| 1
|
'''simple docstring'''
from math import sqrt
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =0
for i in range(1 , int(sqrt(a__ ) + 1 ) ):
if n % i == 0 and i != sqrt(a__ ):
total += i + n // i
elif i == sqrt(a__ ):
total += i
return total - n
def UpperCamelCase__ ( a__ = 1_0_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =sum(
i
for i in range(1 , a__ )
if sum_of_divisors(sum_of_divisors(a__ ) ) == i and sum_of_divisors(a__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'sequence-classification'
def __init__( self , __A ) -> List[Any]:
if type(__A ) == dict:
_lowerCAmelCase =Namespace(**__A )
_lowerCAmelCase =glue_output_modes[hparams.task]
_lowerCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def UpperCamelCase__ ( self , **__A ) -> Any:
return self.model(**__A )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase =outputs[0]
_lowerCAmelCase =self.trainer.lr_schedulers[0]['scheduler']
_lowerCAmelCase ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.hparams
_lowerCAmelCase =processors[args.task]()
_lowerCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase =self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , __A )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_lowerCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_lowerCAmelCase =convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , __A )
torch.save(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A = False ) -> DataLoader:
_lowerCAmelCase ='dev' if mode == 'test' else mode
_lowerCAmelCase =self._feature_file(__A )
logger.info('Loading features from cached file %s' , __A )
_lowerCAmelCase =torch.load(__A )
_lowerCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def UpperCamelCase__ ( self , __A , __A ) -> List[str]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase , _lowerCAmelCase =outputs[:2]
_lowerCAmelCase =logits.detach().cpu().numpy()
_lowerCAmelCase =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __A ) -> tuple:
_lowerCAmelCase =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_lowerCAmelCase =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =np.squeeze(__A )
_lowerCAmelCase =np.concatenate([x['target'] for x in outputs] , axis=0 )
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_lowerCAmelCase =dict(results.items() )
_lowerCAmelCase =results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __A , __A ) -> Any:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'--max_seq_length' , default=128 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=__A , required=__A , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=__A , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
add_generic_args(a__ , os.getcwd() )
_lowerCAmelCase =GLUETransformer.add_model_specific_args(a__ , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase =os.path.join(
'./results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_lowerCAmelCase =GLUETransformer(a__ )
_lowerCAmelCase =generic_train(a__ , a__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=a__ ) )
_lowerCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(a__ )
if __name__ == "__main__":
main()
| 58
| 1
|
'''simple docstring'''
import argparse
import os
import re
lowercase_ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase_ = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase_ = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase_ = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase_ = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase_ = re.compile(r'''\[([^\]]+)\]''')
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =_re_indent.search(a__ )
return "" if search is None else search.groups()[0]
def UpperCamelCase__ ( a__ , a__="" , a__=None , a__=None ):
'''simple docstring'''
_lowerCAmelCase =0
_lowerCAmelCase =code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(a__ ):
index += 1
_lowerCAmelCase =['\n'.join(lines[:index] )]
else:
_lowerCAmelCase =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase =[lines[index]]
index += 1
while index < len(a__ ) and (end_prompt is None or not lines[index].startswith(a__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(a__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(a__ ) )
if index < len(a__ ) - 1:
_lowerCAmelCase =[lines[index + 1]]
index += 1
else:
_lowerCAmelCase =[]
else:
blocks.append('\n'.join(a__ ) )
_lowerCAmelCase =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(a__ ) > 0:
blocks.append('\n'.join(a__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(a__ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
def _inner(a__ ):
return key(a__ ).lower().replace('_' , '' )
return _inner
def UpperCamelCase__ ( a__ , a__=None ):
'''simple docstring'''
def noop(a__ ):
return x
if key is None:
_lowerCAmelCase =noop
# Constants are all uppercase, they go first.
_lowerCAmelCase =[obj for obj in objects if key(a__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase =[obj for obj in objects if key(a__ )[0].isupper() and not key(a__ ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase =[obj for obj in objects if not key(a__ )[0].isupper()]
_lowerCAmelCase =ignore_underscore(a__ )
return sorted(a__ , key=a__ ) + sorted(a__ , key=a__ ) + sorted(a__ , key=a__ )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
def _replace(a__ ):
_lowerCAmelCase =match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
_lowerCAmelCase =[part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase =keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(a__ )] ) + "]"
_lowerCAmelCase =import_statement.split('\n' )
if len(a__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase =2 if lines[1].strip() == '[' else 1
_lowerCAmelCase =[(i, _re_strip_line.search(a__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase =sort_objects(a__ , key=lambda a__ : x[1] )
_lowerCAmelCase =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(a__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase =_re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase =[part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase =keys[:-1]
_lowerCAmelCase =get_indent(lines[1] ) + ', '.join([F'''"{k}"''' for k in sort_objects(a__ )] )
return "\n".join(a__ )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase =_re_bracket_content.sub(_replace , a__ )
return import_statement
def UpperCamelCase__ ( a__ , a__=True ):
'''simple docstring'''
with open(a__ , 'r' ) as f:
_lowerCAmelCase =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase =split_code_in_indented_blocks(
a__ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(a__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase =main_blocks[block_idx]
_lowerCAmelCase =block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase =0
while line_idx < len(a__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase =len(a__ )
else:
line_idx += 1
if line_idx >= len(a__ ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase ='\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase =split_code_in_indented_blocks(a__ , indent_level=a__ )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase =_re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase =[(pattern.search(a__ ).groups()[0] if pattern.search(a__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase =[(i, key) for i, key in enumerate(a__ ) if key is not None]
_lowerCAmelCase =[x[0] for x in sorted(a__ , key=lambda a__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase =0
_lowerCAmelCase =[]
for i in range(len(a__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(a__ )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase ='\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(a__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(a__ , 'w' ) as f:
f.write('\n'.join(a__ ) )
def UpperCamelCase__ ( a__=True ):
'''simple docstring'''
_lowerCAmelCase =[]
for root, _, files in os.walk(a__ ):
if "__init__.py" in files:
_lowerCAmelCase =sort_imports(os.path.join(a__ , '__init__.py' ) , check_only=a__ )
if result:
_lowerCAmelCase =[os.path.join(a__ , '__init__.py' )]
if len(a__ ) > 0:
raise ValueError(F'''Would overwrite {len(a__ )} files, run `make style`.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 58
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 1
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE ( _lowerCamelCase):
"""simple docstring"""
lowercase : Union[str, Any] = 'unispeech-sat'
def __init__( self , __A=32 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.1 , __A=0.1 , __A=0.02 , __A=1E-5 , __A="group" , __A="gelu" , __A=(512, 512, 512, 512, 512, 512, 512) , __A=(5, 2, 2, 2, 2, 2, 2) , __A=(10, 3, 3, 3, 3, 2, 2) , __A=False , __A=128 , __A=16 , __A=False , __A=True , __A=0.05 , __A=10 , __A=2 , __A=0.0 , __A=10 , __A=0 , __A=320 , __A=2 , __A=0.1 , __A=100 , __A=256 , __A=256 , __A=0.1 , __A="mean" , __A=False , __A=False , __A=256 , __A=(512, 512, 512, 512, 1500) , __A=(5, 3, 3, 1, 1) , __A=(1, 2, 3, 1, 1) , __A=512 , __A=0 , __A=1 , __A=2 , __A=504 , **__A , ) -> List[str]:
super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =feat_extract_norm
_lowerCAmelCase =feat_extract_activation
_lowerCAmelCase =list(A__ )
_lowerCAmelCase =list(A__ )
_lowerCAmelCase =list(A__ )
_lowerCAmelCase =conv_bias
_lowerCAmelCase =num_conv_pos_embeddings
_lowerCAmelCase =num_conv_pos_embedding_groups
_lowerCAmelCase =len(self.conv_dim )
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_dropout
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =activation_dropout
_lowerCAmelCase =feat_proj_dropout
_lowerCAmelCase =final_dropout
_lowerCAmelCase =layerdrop
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =initializer_range
_lowerCAmelCase =vocab_size
_lowerCAmelCase =num_clusters
_lowerCAmelCase =do_stable_layer_norm
_lowerCAmelCase =use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase =apply_spec_augment
_lowerCAmelCase =mask_time_prob
_lowerCAmelCase =mask_time_length
_lowerCAmelCase =mask_time_min_masks
_lowerCAmelCase =mask_feature_prob
_lowerCAmelCase =mask_feature_length
_lowerCAmelCase =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase =num_codevectors_per_group
_lowerCAmelCase =num_codevector_groups
_lowerCAmelCase =contrastive_logits_temperature
_lowerCAmelCase =feat_quantizer_dropout
_lowerCAmelCase =num_negatives
_lowerCAmelCase =codevector_dim
_lowerCAmelCase =proj_codevector_dim
_lowerCAmelCase =diversity_loss_weight
# ctc loss
_lowerCAmelCase =ctc_loss_reduction
_lowerCAmelCase =ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase =list(A__ )
_lowerCAmelCase =list(A__ )
_lowerCAmelCase =list(A__ )
_lowerCAmelCase =xvector_output_dim
@property
def UpperCamelCase__ ( self ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 700
|
'''simple docstring'''
from PIL import Image
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def brightness(a__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(a__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 58
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
_lowerCAmelCase =hex_num[0] == '-'
if is_negative:
_lowerCAmelCase =hex_num[1:]
try:
_lowerCAmelCase =int(__A , 1_6 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
_lowerCAmelCase =''
while int_num > 0:
_lowerCAmelCase =str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[Any]:
_lowerCAmelCase =TOKEN
HfFolder.save_token(__A )
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id='test-config' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='valid_org/test-config-org' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> List[str]:
CustomConfig.register_for_auto_class()
_lowerCAmelCase =CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowerCAmelCase =AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase =c.n_embd + 1 # int
_lowerCAmelCase =c.resid_pdrop + 1.0 # float
_lowerCAmelCase =not c.scale_attn_weights # bool
_lowerCAmelCase =c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__A , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__A , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__A , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__A , c.summary_type , 'mismatch for key: summary_type' )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =PretrainedConfig()
_lowerCAmelCase =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowerCAmelCase =[key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(__A )}.''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase =mock.Mock()
_lowerCAmelCase =500
_lowerCAmelCase ={}
_lowerCAmelCase =HTTPError
_lowerCAmelCase ={}
# Download this model to make sure it's in the cache.
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__A ) as mock_head:
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase =BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =AutoConfig.from_pretrained('bert-base-cased' )
_lowerCAmelCase =['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
_lowerCAmelCase =2
json.dump(configuration.to_dict() , open(os.path.join(__A , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase =['config.42.0.0.json']
_lowerCAmelCase =768
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , 'config.4.0.0.json' ) , os.path.join(__A , 'config.42.0.0.json' ) )
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase__ ( self ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase ='hf-internal-testing/test-two-configs'
import transformers as new_transformers
_lowerCAmelCase ='v4.0.0'
_lowerCAmelCase , _lowerCAmelCase =new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase ='v3.0.0'
_lowerCAmelCase =old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 768 )
| 58
| 0
|
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase__ ( a__ , a__=False ):
'''simple docstring'''
try:
_lowerCAmelCase =os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_lowerCAmelCase =default
else:
# KEY is set, convert it to True or False.
try:
_lowerCAmelCase =strtobool(_SCREAMING_SNAKE_CASE )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
lowercase_ = parse_flag_from_env('''RUN_SLOW''', default=False)
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skip('Test was skipped' )(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__=None , a__=None ):
'''simple docstring'''
if test_case is None:
return partial(_SCREAMING_SNAKE_CASE , version=_SCREAMING_SNAKE_CASE )
return unittest.skipUnless(is_torch_version('>=' , _SCREAMING_SNAKE_CASE ) , F'''test requires torch version >= {version}''' )(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_SCREAMING_SNAKE_CASE )
lowercase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
lowercase : Tuple = True
@classmethod
def UpperCamelCase__ ( cls ) -> str:
_lowerCAmelCase =tempfile.mkdtemp()
@classmethod
def UpperCamelCase__ ( cls ) -> Any:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def UpperCamelCase__ ( self ) -> Optional[Any]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> str:
_lowerCAmelCase =mocks if isinstance(UpperCAmelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =AcceleratorState()
_lowerCAmelCase =tensor[None].clone().to(state.device )
_lowerCAmelCase =gather(_SCREAMING_SNAKE_CASE ).cpu()
_lowerCAmelCase =tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _SCREAMING_SNAKE_CASE ):
return False
return True
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Any:
_lowerCAmelCase =returncode
_lowerCAmelCase =stdout
_lowerCAmelCase =stderr
async def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
while True:
_lowerCAmelCase =await stream.readline()
if line:
callback(_SCREAMING_SNAKE_CASE )
else:
break
async def UpperCamelCase__ ( a__ , a__=None , a__=None , a__=None , a__=False , a__=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(_SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase =await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_SCREAMING_SNAKE_CASE , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_SCREAMING_SNAKE_CASE , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_lowerCAmelCase =[]
_lowerCAmelCase =[]
def tee(a__ , a__ , a__ , a__="" ):
_lowerCAmelCase =line.decode('utf-8' ).rstrip()
sink.append(_SCREAMING_SNAKE_CASE )
if not quiet:
print(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , file=_SCREAMING_SNAKE_CASE )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda a__ : tee(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda a__ : tee(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , sys.stderr , label='stderr:' ) ) ),
] , timeout=_SCREAMING_SNAKE_CASE , )
return _RunOutput(await p.wait() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( a__ , a__=None , a__=None , a__=1_8_0 , a__=False , a__=True ):
'''simple docstring'''
_lowerCAmelCase =asyncio.get_event_loop()
_lowerCAmelCase =loop.run_until_complete(
_stream_subprocess(_SCREAMING_SNAKE_CASE , env=_SCREAMING_SNAKE_CASE , stdin=_SCREAMING_SNAKE_CASE , timeout=_SCREAMING_SNAKE_CASE , quiet=_SCREAMING_SNAKE_CASE , echo=_SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase =' '.join(_SCREAMING_SNAKE_CASE )
if result.returncode > 0:
_lowerCAmelCase ='\n'.join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
return result
class SCREAMING_SNAKE_CASE ( snake_case__):
"""simple docstring"""
pass
def UpperCamelCase__ ( a__ , a__=False ):
'''simple docstring'''
try:
_lowerCAmelCase =subprocess.check_output(_SCREAMING_SNAKE_CASE , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_SCREAMING_SNAKE_CASE , 'decode' ):
_lowerCAmelCase =output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'''Command `{' '.join(_SCREAMING_SNAKE_CASE )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 702
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , unittest.TestCase):
"""simple docstring"""
lowercase : Dict = ProphetNetTokenizer
lowercase : Tuple = False
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =[
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowerCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase__ ( self , __A ) -> Any:
_lowerCAmelCase ='UNwant\u00E9d,running'
_lowerCAmelCase ='unwanted, running'
return input_text, output_text
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.tokenizer_class(self.vocab_file )
_lowerCAmelCase =tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(A_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =BasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_lowerCAmelCase ={}
for i, token in enumerate(A_ ):
_lowerCAmelCase =i
_lowerCAmelCase =WordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
_lowerCAmelCase =['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowerCAmelCase =[1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102]
_lowerCAmelCase =tokenizer(A_ , padding=A_ , return_tensors='pt' )
self.assertIsInstance(A_ , A_ )
_lowerCAmelCase =list(batch.input_ids.numpy()[0] )
self.assertListEqual(A_ , A_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def UpperCamelCase__ ( self ) -> Tuple:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCamelCase__ ( self ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCamelCase__ ( self ) -> List[Any]:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
_lowerCAmelCase =tokenizer.encode('sequence builders' , add_special_tokens=A_ )
_lowerCAmelCase =tokenizer.encode('multi-sequence build' , add_special_tokens=A_ )
_lowerCAmelCase =tokenizer.build_inputs_with_special_tokens(A_ )
_lowerCAmelCase =tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 703
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =len(a__ ) // 2
# choose the middle 3 elements
_lowerCAmelCase =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"nielsr/canine-s": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
lowercase_ = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowercase_ = 0
lowercase_ = 0XE0_00
lowercase_ = 0XE0_01
lowercase_ = 0XE0_02
lowercase_ = 0XE0_03
lowercase_ = 0XE0_04
# Maps special codepoints to human-readable names.
lowercase_ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowercase_ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class SCREAMING_SNAKE_CASE ( lowercase__):
"""simple docstring"""
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __A=chr(__lowerCamelCase ) , __A=chr(__lowerCamelCase ) , __A=chr(__lowerCamelCase ) , __A=chr(__lowerCamelCase ) , __A=chr(__lowerCamelCase ) , __A=chr(__lowerCamelCase ) , __A=False , __A=2048 , **__A , ) -> int:
_lowerCAmelCase =AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
_lowerCAmelCase =AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
_lowerCAmelCase =AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
_lowerCAmelCase =AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
_lowerCAmelCase =AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , model_max_length=__lowerCamelCase , **__lowerCamelCase , )
# Creates a mapping for looking up the IDs of special symbols.
_lowerCAmelCase ={}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_lowerCAmelCase =codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_lowerCAmelCase ={
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_lowerCAmelCase =UNICODE_VOCAB_SIZE
_lowerCAmelCase =len(self._special_codepoints )
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
return self._unicode_vocab_size
def UpperCamelCase__ ( self , __A ) -> Union[str, Any]:
return list(__lowerCamelCase )
def UpperCamelCase__ ( self , __A ) -> Any:
try:
return ord(__lowerCamelCase )
except TypeError:
raise ValueError(F'''invalid token: \'{token}\'''' )
def UpperCamelCase__ ( self , __A ) -> List[str]:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__lowerCamelCase )
except TypeError:
raise ValueError(F'''invalid id: {index}''' )
def UpperCamelCase__ ( self , __A ) -> Any:
return "".join(__lowerCamelCase )
def UpperCamelCase__ ( self , __A , __A = None ) -> List[str]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
_lowerCAmelCase =cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCamelCase__ ( self , __A , __A = None , __A = False ) -> List[str]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
_lowerCAmelCase =[1] + ([0] * len(__lowerCamelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(__lowerCamelCase )) + [1]
return result
def UpperCamelCase__ ( self , __A , __A = None ) -> List[Any]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
_lowerCAmelCase =len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCamelCase__ ( self , __A , __A = None ) -> Optional[int]:
return ()
| 705
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.txt'''}
lowercase_ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowercase_ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowercase_ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ConvBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ) -> Union[str, Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
_lowerCAmelCase =getattr(__A , normalizer_state.pop('type' ) )
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =strip_accents
_lowerCAmelCase =tokenize_chinese_chars
_lowerCAmelCase =normalizer_class(**__A )
_lowerCAmelCase =do_lower_case
def UpperCamelCase__ ( self , __A , __A=None ) -> int:
_lowerCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 58
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase_ = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE ( __lowerCamelCase):
"""simple docstring"""
lowercase : str = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **__A ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCAmelCase =deprecated_arg[3:]
_lowerCAmelCase =not kwargs.pop(UpperCAmelCase_ )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
_lowerCAmelCase =kwargs.pop('tpu_name' , self.tpu_name )
_lowerCAmelCase =kwargs.pop('device_idx' , self.device_idx )
_lowerCAmelCase =kwargs.pop('eager_mode' , self.eager_mode )
_lowerCAmelCase =kwargs.pop('use_xla' , self.use_xla )
super().__init__(**UpperCAmelCase_ )
lowercase : List[Any] = field(
default=__lowerCamelCase , metadata={'help': 'Name of TPU'} , )
lowercase : Any = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
lowercase : Dict = field(default=__lowerCamelCase , metadata={'help': 'Benchmark models in eager model.'})
lowercase : Union[str, Any] = field(
default=__lowerCamelCase , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
requires_backends(self , ['tf'] )
_lowerCAmelCase =None
if self.tpu:
try:
if self.tpu_name:
_lowerCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_lowerCAmelCase =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_lowerCAmelCase =None
return tpu
@cached_property
def UpperCamelCase__ ( self ) -> Any:
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_lowerCAmelCase =tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
_lowerCAmelCase =tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
_lowerCAmelCase =tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def UpperCamelCase__ ( self ) -> int:
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def UpperCamelCase__ ( self ) -> Any:
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def UpperCamelCase__ ( self ) -> List[str]:
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
return self.n_gpu > 0
| 706
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = ['image_processor', 'tokenizer']
lowercase : Any = 'CLIPImageProcessor'
lowercase : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __A=None , __A=None , **__A ) -> str:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
_lowerCAmelCase =kwargs.pop('feature_extractor' )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Optional[int]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 58
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =[[] for _ in range(a__ )]
_lowerCAmelCase =key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(a__ ) <= key:
return input_string
for position, character in enumerate(a__ ):
_lowerCAmelCase =position % (lowest * 2) # puts it in bounds
_lowerCAmelCase =min(a__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(a__ )
_lowerCAmelCase =[''.join(a__ ) for row in temp_grid]
_lowerCAmelCase =''.join(a__ )
return output_string
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =[]
_lowerCAmelCase =key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
_lowerCAmelCase =[[] for _ in range(a__ )] # generates template
for position in range(len(a__ ) ):
_lowerCAmelCase =position % (lowest * 2) # puts it in bounds
_lowerCAmelCase =min(a__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
_lowerCAmelCase =0
for row in temp_grid: # fills in the characters
_lowerCAmelCase =input_string[counter : counter + len(a__ )]
grid.append(list(a__ ) )
counter += len(a__ )
_lowerCAmelCase ='' # reads as zigzag
for position in range(len(a__ ) ):
_lowerCAmelCase =position % (lowest * 2) # puts it in bounds
_lowerCAmelCase =min(a__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase ={}
for key_guess in range(1 , len(a__ ) ): # tries every key
_lowerCAmelCase =decrypt(a__ , a__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ) -> str:
super().__init__()
_lowerCAmelCase =nn.Sequential(
nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , )
_lowerCAmelCase =nn.Embedding(__A , __A )
_lowerCAmelCase =False
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.ModuleList()
for lyr_num in range(__A ):
# FiLM conditional T5 decoder
_lowerCAmelCase =DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A )
self.decoders.append(__A )
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Any:
_lowerCAmelCase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase =self.conditioning_emb(__A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase =torch.broadcast_to(
torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase =self.position_encoding(__A )
_lowerCAmelCase =self.continuous_inputs_projection(__A )
inputs += position_encodings
_lowerCAmelCase =self.dropout(__A )
# decoder: No padding present.
_lowerCAmelCase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase =[(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase =lyr(
__A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0]
_lowerCAmelCase =self.decoder_norm(__A )
_lowerCAmelCase =self.post_dropout(__A )
_lowerCAmelCase =self.spec_out(__A )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Any:
_lowerCAmelCase =self.layer[0](
__A , conditioning_emb=__A , attention_mask=__A , )
if encoder_hidden_states is not None:
_lowerCAmelCase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase =self.layer[1](
__A , key_value_states=__A , attention_mask=__A , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase =self.layer[-1](__A , __A )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> List[Any]:
# pre_self_attention_layer_norm
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.FiLMLayer(__A , __A )
# Self-attention block
_lowerCAmelCase =self.attention(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[int]:
super().__init__()
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> Tuple:
_lowerCAmelCase =self.layer_norm(__A )
_lowerCAmelCase =self.attention(
__A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.film(__A , __A )
_lowerCAmelCase =self.DenseReluDense(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(__A )
_lowerCAmelCase =NewGELUActivation()
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =self.act(self.wi_a(__A ) )
_lowerCAmelCase =self.wi_a(__A )
_lowerCAmelCase =hidden_gelu * hidden_linear
_lowerCAmelCase =self.dropout(__A )
_lowerCAmelCase =self.wo(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A=1E-6 ) -> int:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.ones(__A ) )
_lowerCAmelCase =eps
def UpperCamelCase__ ( self , __A ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A )
_lowerCAmelCase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__A , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , out_features * 2 , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.scale_bias(__A )
_lowerCAmelCase , _lowerCAmelCase =torch.chunk(__A , 2 , -1 )
_lowerCAmelCase =x * (1 + scale) + shift
return x
| 58
| 0
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =inspect.getfile(accelerate.test_utils )
_lowerCAmelCase =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase =os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def UpperCamelCase__ ( self ) -> Optional[int]:
print(F'''Found {torch.cuda.device_count()} devices.''' )
_lowerCAmelCase =['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
print(F'''Found {torch.cuda.device_count()} devices.''' )
_lowerCAmelCase =['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase__ ( self ) -> List[Any]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
_lowerCAmelCase =['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(__A , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = Accelerator()
lowercase_ = (accelerator.state.process_index + 2, 10)
lowercase_ = torch.randint(0, 10, shape).to(accelerator.device)
lowercase_ = ''''''
lowercase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 708
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =b.T
_lowerCAmelCase =np.sum(np.square(__lowerCAmelCase ) , axis=1 )
_lowerCAmelCase =np.sum(np.square(__lowerCAmelCase ) , axis=0 )
_lowerCAmelCase =np.matmul(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase =aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =x.reshape(-1 , 3 )
_lowerCAmelCase =squared_euclidean_distance(__lowerCAmelCase , __lowerCAmelCase )
return np.argmin(__lowerCAmelCase , axis=1 )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = ['pixel_values']
def __init__( self , __A = None , __A = True , __A = None , __A = PILImageResampling.BILINEAR , __A = True , __A = True , **__A , ) -> int:
super().__init__(**_lowerCAmelCase )
_lowerCAmelCase =size if size is not None else {'height': 256, 'width': 256}
_lowerCAmelCase =get_size_dict(_lowerCAmelCase )
_lowerCAmelCase =np.array(_lowerCAmelCase ) if clusters is not None else None
_lowerCAmelCase =do_resize
_lowerCAmelCase =size
_lowerCAmelCase =resample
_lowerCAmelCase =do_normalize
_lowerCAmelCase =do_color_quantize
def UpperCamelCase__ ( self , __A , __A , __A = PILImageResampling.BILINEAR , __A = None , **__A , ) -> List[str]:
_lowerCAmelCase =get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
_lowerCAmelCase , size=(size['height'], size['width']) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def UpperCamelCase__ ( self , __A , __A = None , ) -> List[str]:
_lowerCAmelCase =rescale(image=_lowerCAmelCase , scale=1 / 127.5 , data_format=_lowerCAmelCase )
_lowerCAmelCase =image - 1
return image
def UpperCamelCase__ ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> Optional[int]:
_lowerCAmelCase =do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase =size if size is not None else self.size
_lowerCAmelCase =get_size_dict(_lowerCAmelCase )
_lowerCAmelCase =resample if resample is not None else self.resample
_lowerCAmelCase =do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase =do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_lowerCAmelCase =clusters if clusters is not None else self.clusters
_lowerCAmelCase =np.array(_lowerCAmelCase )
_lowerCAmelCase =make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase =[to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase =[self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_normalize:
_lowerCAmelCase =[self.normalize(image=_lowerCAmelCase ) for image in images]
if do_color_quantize:
_lowerCAmelCase =[to_channel_dimension_format(_lowerCAmelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_lowerCAmelCase =np.array(_lowerCAmelCase )
_lowerCAmelCase =color_quantize(_lowerCAmelCase , _lowerCAmelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_lowerCAmelCase =images.shape[0]
_lowerCAmelCase =images.reshape(_lowerCAmelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_lowerCAmelCase =list(_lowerCAmelCase )
else:
_lowerCAmelCase =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_lowerCAmelCase ={'input_ids': images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 709
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( ):
'''simple docstring'''
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
lowercase_ = generate_large_matrix()
lowercase_ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
assert all(row == sorted(lowerCamelCase__ , reverse=lowerCamelCase__ ) for row in grid )
assert all(list(lowerCamelCase__ ) == sorted(lowerCamelCase__ , reverse=lowerCamelCase__ ) for col in zip(*lowerCamelCase__ ) )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =0
_lowerCAmelCase =len(lowerCamelCase__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_lowerCAmelCase =(left + right) // 2
_lowerCAmelCase =array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_lowerCAmelCase =mid + 1
else:
_lowerCAmelCase =mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowerCamelCase__ )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =0
_lowerCAmelCase =len(grid[0] )
for i in range(len(lowerCamelCase__ ) ):
_lowerCAmelCase =find_negative_index(grid[i][:bound] )
total += bound
return (len(lowerCamelCase__ ) * len(grid[0] )) - total
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =0
for row in grid:
for i, number in enumerate(lowerCamelCase__ ):
if number < 0:
total += len(lowerCamelCase__ ) - i
break
return total
def UpperCamelCase__ ( ):
'''simple docstring'''
from timeit import timeit
print('Running benchmarks' )
_lowerCAmelCase =(
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_lowerCAmelCase =timeit(F'''{func}(grid=grid)''' , setup=lowerCamelCase__ , number=5_0_0 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 710
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =str(_lowerCAmelCase )
return len(_lowerCAmelCase ) == 9 and set(_lowerCAmelCase ) == set('123456789' )
def UpperCamelCase__ ( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
_lowerCAmelCase =1_0_0_0_0_2 * base_num
if is_9_pandigital(_lowerCAmelCase ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
_lowerCAmelCase =1_0_0_2_0_0_3 * base_num
if is_9_pandigital(_lowerCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 711
|
'''simple docstring'''
def UpperCamelCase__ ( a__ = 1_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =2**power
_lowerCAmelCase =0
while n:
_lowerCAmelCase , _lowerCAmelCase =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
lowercase_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
lowercase_ = "▁"
# Segments (not really needed)
lowercase_ = 0
lowercase_ = 1
lowercase_ = 2
lowercase_ = 3
lowercase_ = 4
class SCREAMING_SNAKE_CASE ( UpperCamelCase_):
"""simple docstring"""
lowercase : Tuple = VOCAB_FILES_NAMES
lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : str = """left"""
lowercase : Optional[Any] = XLNetTokenizer
def __init__( self , __A=None , __A=None , __A=False , __A=True , __A=False , __A="<s>" , __A="</s>" , __A="<unk>" , __A="<sep>" , __A="<pad>" , __A="<cls>" , __A="<mask>" , __A=["<eop>", "<eod>"] , **__A , ) -> Any:
_lowerCAmelCase =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , )
_lowerCAmelCase =3
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =remove_space
_lowerCAmelCase =keep_accents
_lowerCAmelCase =vocab_file
_lowerCAmelCase =False if not self.vocab_file else True
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 712
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCAmelCase =set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 0
|
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
lowercase_ = 637_8137.0
lowercase_ = 635_6752.31_4245
lowercase_ = 637_8137
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =(AXIS_A - AXIS_B) / AXIS_A
_lowerCAmelCase =atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
_lowerCAmelCase =atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
_lowerCAmelCase =radians(__UpperCAmelCase )
_lowerCAmelCase =radians(__UpperCAmelCase )
# Equation
_lowerCAmelCase =sin((phi_a - phi_a) / 2 )
_lowerCAmelCase =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_lowerCAmelCase =sqrt(sin_sq_phi + (cos(__UpperCAmelCase ) * cos(__UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 58
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : Optional[Any] = LxmertTokenizer
lowercase : List[Any] = LxmertTokenizerFast
lowercase : List[Any] = True
lowercase : List[str] = True
def UpperCamelCase__ ( self ) -> Any:
super().setUp()
_lowerCAmelCase =[
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowerCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase__ ( self , __A ) -> Union[str, Any]:
_lowerCAmelCase ='UNwant\u00E9d,running'
_lowerCAmelCase ='unwanted, running'
return input_text, output_text
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.tokenizer_class(self.vocab_file )
_lowerCAmelCase =tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def UpperCamelCase__ ( self ) -> Dict:
if not self.test_rust_tokenizer:
return
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =self.get_rust_tokenizer()
_lowerCAmelCase ='I was born in 92000, and this is falsé.'
_lowerCAmelCase =tokenizer.tokenize(_UpperCAmelCase )
_lowerCAmelCase =rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase =tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_lowerCAmelCase =rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase =self.get_rust_tokenizer()
_lowerCAmelCase =tokenizer.encode(_UpperCAmelCase )
_lowerCAmelCase =rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
| 714
|
'''simple docstring'''
lowercase_ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='Morse code here!'
print(a__ )
_lowerCAmelCase =encrypt(a__ )
print(a__ )
_lowerCAmelCase =decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowercase_ = 10
lowercase_ = 256
def UpperCamelCase__ ( a__ ) -> Optional[int]:
'''simple docstring'''
if len(__A ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase =MinHash(num_perm=__A )
for token in set(__A ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase__ ( a__ ) -> Dict:
'''simple docstring'''
return {t for t in NON_ALPHA.split(__A ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , *,
__A = 0.85 , ) -> List[Any]:
_lowerCAmelCase =duplication_jaccard_threshold
_lowerCAmelCase =NUM_PERM
_lowerCAmelCase =MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase =defaultdict(_UpperCamelCase )
def UpperCamelCase__ ( self , __A , __A ) -> None:
_lowerCAmelCase =self._index.query(_UpperCamelCase )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_UpperCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_UpperCamelCase )
def UpperCamelCase__ ( self ) -> List[List[Dict]]:
_lowerCAmelCase =[]
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase =[base] + list(_UpperCamelCase )
# reformat the cluster to be a list of dict
_lowerCAmelCase =[{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(_UpperCamelCase )
return duplicate_clusters
def UpperCamelCase__ ( self , __A ) -> None:
_lowerCAmelCase =self.get_duplicate_clusters()
with open(_UpperCamelCase , 'w' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase__ ( a__ ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase =element
_lowerCAmelCase =get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase__ ( a__ ) -> Any:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__A , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def UpperCamelCase__ ( a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase =DuplicationIndex(duplication_jaccard_threshold=__A )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__A ) ) , max_queue_size=1_0_0 ) ):
di.add(__A , __A )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase__ ( a__ , a__ ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase =get_tokens(__A )
_lowerCAmelCase =get_tokens(__A )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase_ = None
def UpperCamelCase__ ( a__ , a__ ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase =[]
for elementa in cluster:
_lowerCAmelCase =_shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase =_shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(__A , __A ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase =1
extremes.append(__A )
return extremes
def UpperCamelCase__ ( a__ , a__ , a__ ) -> Tuple:
'''simple docstring'''
global _shared_dataset
_lowerCAmelCase =dataset
_lowerCAmelCase =[]
_lowerCAmelCase =partial(_find_cluster_extremes_shared , jaccard_threshold=__A )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__A , __A , ) , total=len(__A ) , ):
extremes_list.append(__A )
return extremes_list
def UpperCamelCase__ ( a__ , a__ = 0.85 ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase =make_duplicate_clusters(__A , __A )
_lowerCAmelCase ={x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase ={}
_lowerCAmelCase =find_extremes(__A , __A , __A )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase =element
_lowerCAmelCase =duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase =dataset.filter(lambda a__ , a__ : idx not in remove_indices , with_indices=__A )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase =element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase =extreme_dict[element["""base_index"""]]["""copies"""]
print(F'''Original dataset size: {len(__A )}''' )
print(F'''Number of duplicate clusters: {len(__A )}''' )
print(F'''Files in duplicate cluster: {len(__A )}''' )
print(F'''Unique files in duplicate cluster: {len(__A )}''' )
print(F'''Filtered dataset size: {len(__A )}''' )
return ds_filter, duplicate_clusters
| 715
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'data2vec-text'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 58
| 0
|
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowercase_ = logging.get_logger(__name__)
# General docstring
lowercase_ = '''PoolFormerConfig'''
# Base docstring
lowercase_ = '''sail/poolformer_s12'''
lowercase_ = [1, 512, 7, 7]
# Image classification docstring
lowercase_ = '''sail/poolformer_s12'''
lowercase_ = '''tabby, tabby cat'''
lowercase_ = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCamelCase__ ( a__ , a__ = 0.0 , a__ = False ):
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
_lowerCAmelCase =1 - drop_prob
_lowerCAmelCase =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_lowerCAmelCase =keep_prob + torch.rand(_lowerCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_lowerCAmelCase =input.div(_lowerCamelCase ) * random_tensor
return output
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A = None ) -> str:
super().__init__()
_lowerCAmelCase =drop_prob
def UpperCamelCase__ ( self , __A ) -> int:
return drop_path(_A , self.drop_prob , self.training )
def UpperCamelCase__ ( self ) -> Tuple:
return "p={}".format(self.drop_prob )
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A=None ) -> Dict:
super().__init__()
_lowerCAmelCase =patch_size if isinstance(_A , collections.abc.Iterable ) else (patch_size, patch_size)
_lowerCAmelCase =stride if isinstance(_A , collections.abc.Iterable ) else (stride, stride)
_lowerCAmelCase =padding if isinstance(_A , collections.abc.Iterable ) else (padding, padding)
_lowerCAmelCase =nn.Convad(_A , _A , kernel_size=_A , stride=_A , padding=_A )
_lowerCAmelCase =norm_layer(_A ) if norm_layer else nn.Identity()
def UpperCamelCase__ ( self , __A ) -> Optional[int]:
_lowerCAmelCase =self.projection(_A )
_lowerCAmelCase =self.norm(_A )
return embeddings
class SCREAMING_SNAKE_CASE ( nn.GroupNorm):
"""simple docstring"""
def __init__( self , __A , **__A ) -> str:
super().__init__(1 , _A , **_A )
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A ) -> List[Any]:
super().__init__()
_lowerCAmelCase =nn.AvgPoolad(_A , stride=1 , padding=pool_size // 2 , count_include_pad=_A )
def UpperCamelCase__ ( self , __A ) -> str:
return self.pool(_A ) - hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Tuple:
super().__init__()
_lowerCAmelCase =nn.Convad(_A , _A , 1 )
_lowerCAmelCase =nn.Convad(_A , _A , 1 )
_lowerCAmelCase =PoolFormerDropPath(_A )
if isinstance(config.hidden_act , _A ):
_lowerCAmelCase =ACTaFN[config.hidden_act]
else:
_lowerCAmelCase =config.hidden_act
def UpperCamelCase__ ( self , __A ) -> Tuple:
_lowerCAmelCase =self.conva(_A )
_lowerCAmelCase =self.act_fn(_A )
_lowerCAmelCase =self.drop(_A )
_lowerCAmelCase =self.conva(_A )
_lowerCAmelCase =self.drop(_A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A ) -> str:
super().__init__()
_lowerCAmelCase =PoolFormerPooling(_A )
_lowerCAmelCase =PoolFormerOutput(_A , _A , _A , _A )
_lowerCAmelCase =PoolFormerGroupNorm(_A )
_lowerCAmelCase =PoolFormerGroupNorm(_A )
# Useful for training neural nets
_lowerCAmelCase =PoolFormerDropPath(_A ) if drop_path > 0.0 else nn.Identity()
_lowerCAmelCase =config.use_layer_scale
if config.use_layer_scale:
_lowerCAmelCase =nn.Parameter(
config.layer_scale_init_value * torch.ones((_A) ) , requires_grad=_A )
_lowerCAmelCase =nn.Parameter(
config.layer_scale_init_value * torch.ones((_A) ) , requires_grad=_A )
def UpperCamelCase__ ( self , __A ) -> Optional[int]:
if self.use_layer_scale:
_lowerCAmelCase =self.pooling(self.before_norm(_A ) )
_lowerCAmelCase =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_lowerCAmelCase =hidden_states + self.drop_path(_A )
_lowerCAmelCase =()
_lowerCAmelCase =self.output(self.after_norm(_A ) )
_lowerCAmelCase =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_lowerCAmelCase =hidden_states + self.drop_path(_A )
_lowerCAmelCase =(output,) + outputs
return outputs
else:
_lowerCAmelCase =self.drop_path(self.pooling(self.before_norm(_A ) ) )
# First residual connection
_lowerCAmelCase =pooling_output + hidden_states
_lowerCAmelCase =()
# Second residual connection inside the PoolFormerOutput block
_lowerCAmelCase =self.drop_path(self.output(self.after_norm(_A ) ) )
_lowerCAmelCase =hidden_states + layer_output
_lowerCAmelCase =(output,) + outputs
return outputs
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A ) -> Dict:
super().__init__()
_lowerCAmelCase =config
# stochastic depth decay rule
_lowerCAmelCase =[x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_lowerCAmelCase =[]
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_lowerCAmelCase =nn.ModuleList(_A )
# Transformer blocks
_lowerCAmelCase =[]
_lowerCAmelCase =0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_lowerCAmelCase =[]
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(_A ) )
_lowerCAmelCase =nn.ModuleList(_A )
def UpperCamelCase__ ( self , __A , __A=False , __A=True ) -> Optional[int]:
_lowerCAmelCase =() if output_hidden_states else None
_lowerCAmelCase =pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_lowerCAmelCase =layers
# Get patch embeddings from hidden_states
_lowerCAmelCase =embedding_layer(_A )
# Send the embeddings through the blocks
for _, blk in enumerate(_A ):
_lowerCAmelCase =blk(_A )
_lowerCAmelCase =layer_outputs[0]
if output_hidden_states:
_lowerCAmelCase =all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A )
class SCREAMING_SNAKE_CASE ( a__):
"""simple docstring"""
lowercase : Dict = PoolFormerConfig
lowercase : int = 'poolformer'
lowercase : Tuple = 'pixel_values'
lowercase : Optional[Any] = True
def UpperCamelCase__ ( self , __A ) -> str:
if isinstance(_A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def UpperCamelCase__ ( self , __A , __A=False ) -> Any:
if isinstance(_A , _A ):
_lowerCAmelCase =value
lowercase_ = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowercase_ = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , a__ , )
class SCREAMING_SNAKE_CASE ( a__):
"""simple docstring"""
def __init__( self , __A ) -> List[Any]:
super().__init__(_A )
_lowerCAmelCase =config
_lowerCAmelCase =PoolFormerEncoder(_A )
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase__ ( self ) -> Tuple:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase__ ( self , __A = None , __A = None , __A = None , ) -> List[str]:
_lowerCAmelCase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
_lowerCAmelCase =self.encoder(
_A , output_hidden_states=_A , return_dict=_A , )
_lowerCAmelCase =encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_A , hidden_states=encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(config.hidden_size , config.hidden_size )
def UpperCamelCase__ ( self , __A ) -> Optional[int]:
_lowerCAmelCase =self.dense(_A )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , a__ , )
class SCREAMING_SNAKE_CASE ( a__):
"""simple docstring"""
def __init__( self , __A ) -> List[Any]:
super().__init__(_A )
_lowerCAmelCase =config.num_labels
_lowerCAmelCase =PoolFormerModel(_A )
# Final norm
_lowerCAmelCase =PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_lowerCAmelCase =(
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase__ ( self , __A = None , __A = None , __A = None , __A = None , ) -> List[str]:
_lowerCAmelCase =return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase =self.poolformer(
_A , output_hidden_states=_A , return_dict=_A , )
_lowerCAmelCase =outputs[0]
_lowerCAmelCase =self.classifier(self.norm(_A ).mean([-2, -1] ) )
_lowerCAmelCase =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase ='regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase ='single_label_classification'
else:
_lowerCAmelCase ='multi_label_classification'
if self.config.problem_type == "regression":
_lowerCAmelCase =MSELoss()
if self.num_labels == 1:
_lowerCAmelCase =loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowerCAmelCase =loss_fct(_A , _A )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase =CrossEntropyLoss()
_lowerCAmelCase =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase =BCEWithLogitsLoss()
_lowerCAmelCase =loss_fct(_A , _A )
if not return_dict:
_lowerCAmelCase =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
| 716
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = IFPipeline
lowercase : Tuple = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : int = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase__ ( self ) -> str:
return self._get_dummy_components()
def UpperCamelCase__ ( self , __A , __A=0 ) -> int:
if str(__A ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(__A )
else:
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(__A )
_lowerCAmelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ) -> str:
self._test_save_load_local()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
# if
_lowerCAmelCase =IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase =IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__A , tokenizer=__A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_lowerCAmelCase , _lowerCAmelCase =pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase =None
_lowerCAmelCase =None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase =IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase =IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase =IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase =IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__A , __A , __A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 58
| 0
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def __init__( self , __A , __A = True , __A = None , __A = 32 , __A = True , __A = 1 / 255 , __A = True , __A = True , __A = [0.48_145_466, 0.4_578_275, 0.40_821_073] , __A = [0.26_862_954, 0.26_130_258, 0.27_577_711] , __A = True , __A=7 , __A=30 , __A=400 , __A=3 , ) -> Optional[int]:
_lowerCAmelCase =parent
_lowerCAmelCase =do_resize
_lowerCAmelCase =size if size is not None else {"shortest_edge": 288}
_lowerCAmelCase =size_divisor
_lowerCAmelCase =do_rescale
_lowerCAmelCase =rescale_factor
_lowerCAmelCase =do_normalize
_lowerCAmelCase =do_center_crop
_lowerCAmelCase =image_mean
_lowerCAmelCase =image_std
_lowerCAmelCase =do_pad
_lowerCAmelCase =batch_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =min_resolution
_lowerCAmelCase =max_resolution
def UpperCamelCase__ ( self ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCamelCase__ ( self , __A , __A=False ) -> Union[str, Any]:
if not batched:
_lowerCAmelCase =self.size["shortest_edge"]
_lowerCAmelCase =image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
_lowerCAmelCase =image.size
else:
_lowerCAmelCase =image.shape[1], image.shape[2]
_lowerCAmelCase =size / min(__lowerCamelCase , __lowerCamelCase )
if h < w:
_lowerCAmelCase =size, scale * w
else:
_lowerCAmelCase =scale * h, size
_lowerCAmelCase =int((1333 / 800) * size )
if max(__lowerCamelCase , __lowerCamelCase ) > max_size:
_lowerCAmelCase =max_size / max(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase =newh * scale
_lowerCAmelCase =neww * scale
_lowerCAmelCase =int(newh + 0.5 ), int(neww + 0.5 )
_lowerCAmelCase =(
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
_lowerCAmelCase =[]
for image in image_inputs:
_lowerCAmelCase =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCAmelCase =max(__lowerCamelCase , key=lambda __A : item[0] )[0]
_lowerCAmelCase =max(__lowerCamelCase , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( _A , unittest.TestCase):
"""simple docstring"""
lowercase : str = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =BridgeTowerImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'image_std' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'size' ) )
self.assertTrue(hasattr(__lowerCamelCase , 'size_divisor' ) )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
pass
def UpperCamelCase__ ( self ) -> Optional[Any]:
# Initialize image processor
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase =self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase =image_processing(__lowerCamelCase , return_tensors='pt' ).pixel_values
_lowerCAmelCase =self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
# Initialize image processor
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase =self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase =image_processing(__lowerCamelCase , return_tensors='pt' ).pixel_values
_lowerCAmelCase =self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ) -> str:
# Initialize image processor
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase =self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase =image_processing(__lowerCamelCase , return_tensors='pt' ).pixel_values
_lowerCAmelCase =self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 717
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =[0]
_lowerCAmelCase =[0]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
_lowerCAmelCase =[60]
_lowerCAmelCase =[10]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =3
_lowerCAmelCase =[1, 2, 3]
_lowerCAmelCase =[3, 2, 1]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 5 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =50
_lowerCAmelCase =[60, 100, 120]
_lowerCAmelCase =[10, 20, 30]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 220 )
if __name__ == "__main__":
unittest.main()
| 58
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["MaskFormerFeatureExtractor"]
lowercase_ = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
lowercase_ = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 718
|
'''simple docstring'''
lowercase_ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58
| 0
|
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A=99 , __A=13 , __A=7 , __A=9 , __A=True , __A=True , __A=False , __A=32 , __A=5 , __A=4 , __A=37 , __A=8 , __A=0.1 , __A=0.002 , __A=1 , __A=0 , __A=0 , __A=None , __A=None , ) -> List[Any]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =encoder_seq_length
_lowerCAmelCase =decoder_seq_length
# For common tests
_lowerCAmelCase =self.decoder_seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_attention_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =d_ff
_lowerCAmelCase =relative_attention_num_buckets
_lowerCAmelCase =dropout_rate
_lowerCAmelCase =initializer_factor
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =pad_token_id
_lowerCAmelCase =decoder_start_token_id
_lowerCAmelCase =None
_lowerCAmelCase =decoder_layers
def UpperCamelCase__ ( self ) -> Union[str, Any]:
return TaConfig.from_pretrained('google/umt5-base' )
def UpperCamelCase__ ( self , __A , __A , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> int:
if attention_mask is None:
_lowerCAmelCase =input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowerCAmelCase =decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowerCAmelCase =torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCamelCase )
if decoder_head_mask is None:
_lowerCAmelCase =torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
if cross_attn_head_mask is None:
_lowerCAmelCase =torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowerCAmelCase =input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase =decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase =self.get_config()
_lowerCAmelCase =config.num_attention_heads
_lowerCAmelCase =self.prepare_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, input_dict
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase , _lowerCAmelCase =self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self ) -> List[str]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCamelCase__ ( self ) -> Dict:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , ) -> List[str]:
_lowerCAmelCase =UMTaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_lowerCAmelCase =model(
input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , )
_lowerCAmelCase =model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase )
_lowerCAmelCase =result.last_hidden_state
_lowerCAmelCase =result.past_key_values
_lowerCAmelCase =result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , ) -> str:
_lowerCAmelCase =UMTaModel(config=__UpperCamelCase ).get_decoder().to(__UpperCamelCase ).eval()
# first forward pass
_lowerCAmelCase =model(__UpperCamelCase , use_cache=__UpperCamelCase )
_lowerCAmelCase =model(__UpperCamelCase )
_lowerCAmelCase =model(__UpperCamelCase , use_cache=__UpperCamelCase )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) + 1 )
_lowerCAmelCase , _lowerCAmelCase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase =ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase =model(__UpperCamelCase )['last_hidden_state']
_lowerCAmelCase =model(__UpperCamelCase , past_key_values=__UpperCamelCase )['last_hidden_state']
# select random slice
_lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase =output_from_no_past[:, -1, random_slice_idx].detach()
_lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
def UpperCamelCase__ ( self , __A , __A , ) -> List[str]:
_lowerCAmelCase =UMTaModel(config=__UpperCamelCase ).to(__UpperCamelCase ).half().eval()
_lowerCAmelCase =model(**__UpperCamelCase )['last_hidden_state']
self.parent.assertFalse(torch.isnan(__UpperCamelCase ).any().item() )
@require_torch
class SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
lowercase : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowercase : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowercase : Optional[Any] = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowercase : Any = True
lowercase : Optional[int] = False
lowercase : Any = False
lowercase : Optional[int] = True
lowercase : Optional[Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowercase : int = [0.8, 0.9]
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase =UMTaModel(config_and_inputs[0] ).to(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=__UpperCamelCase , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCamelCase )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase =config_and_inputs[0]
_lowerCAmelCase =UMTaForConditionalGeneration(__UpperCamelCase ).eval()
model.to(__UpperCamelCase )
_lowerCAmelCase ={
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=__UpperCamelCase ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
}
for attn_name, (name, mask) in zip(__UpperCamelCase , head_masking.items() ):
_lowerCAmelCase ={name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowerCAmelCase =torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCamelCase )
_lowerCAmelCase =model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , **__UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowerCAmelCase =out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def UpperCamelCase__ ( self ) -> List[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=__UpperCamelCase ).to(__UpperCamelCase )
_lowerCAmelCase =AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=__UpperCamelCase , legacy=__UpperCamelCase )
_lowerCAmelCase =[
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowerCAmelCase =tokenizer(__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase ).input_ids
# fmt: off
_lowerCAmelCase =torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =model.generate(input_ids.to(__UpperCamelCase ) )
_lowerCAmelCase =[
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowerCAmelCase =tokenizer.batch_decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 719
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase_ = '''sshleifer/mar_enro_6_3_student'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=__A , )
_lowerCAmelCase =F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
MarianMTModel.from_pretrained(__A )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowerCAmelCase =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowerCAmelCase =F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowerCAmelCase =['finetune.py'] + bash_script.split() + args
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationModule.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowerCAmelCase ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowerCAmelCase =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_lowerCAmelCase =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
_lowerCAmelCase =bash_script.replace('--fp16' , '' )
_lowerCAmelCase =6
_lowerCAmelCase =(
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationDistiller.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowerCAmelCase =distill_main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=30 , __A=2 , __A=3 , __A=True , __A=True , __A=32 , __A=2 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=10 , __A=0.02 , __A=3 , __A=0.6 , __A=None , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =patch_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =is_training
_lowerCAmelCase =use_labels
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =type_sequence_label_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =mask_ratio
_lowerCAmelCase =scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCAmelCase =(image_size // patch_size) ** 2
_lowerCAmelCase =int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase =self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> Optional[Any]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCamelCase__ ( self , __A , __A , __A ) -> int:
_lowerCAmelCase =TFViTMAEModel(config=snake_case_ )
_lowerCAmelCase =model(snake_case_ , training=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , __A , __A , __A ) -> str:
_lowerCAmelCase =TFViTMAEForPreTraining(snake_case_ )
_lowerCAmelCase =model(snake_case_ , training=snake_case_ )
# expected sequence length = num_patches
_lowerCAmelCase =(self.image_size // self.patch_size) ** 2
_lowerCAmelCase =self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCAmelCase =1
_lowerCAmelCase =TFViTMAEForPreTraining(snake_case_ )
_lowerCAmelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase =model(snake_case_ , training=snake_case_ )
_lowerCAmelCase =self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =config_and_inputs
_lowerCAmelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase):
"""simple docstring"""
lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase : List[str] = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
lowercase : List[Any] = False
lowercase : str = False
lowercase : Dict = False
lowercase : Dict = False
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =TFViTMAEModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def UpperCamelCase__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
pass
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowerCAmelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , tf.keras.layers.Layer ) )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(snake_case_ )
_lowerCAmelCase =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_ )
def UpperCamelCase__ ( self ) -> str:
# make the mask reproducible
np.random.seed(2 )
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =int((config.image_size // config.patch_size) ** 2 )
_lowerCAmelCase =np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(snake_case_ )
_lowerCAmelCase =self._prepare_for_class(snake_case_ , snake_case_ )
_lowerCAmelCase =model(snake_case_ , noise=snake_case_ )
_lowerCAmelCase =copy.deepcopy(self._prepare_for_class(snake_case_ , snake_case_ ) )
_lowerCAmelCase =model(**snake_case_ , noise=snake_case_ )
_lowerCAmelCase =outputs_dict[0].numpy()
_lowerCAmelCase =outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def UpperCamelCase__ ( self ) -> str:
# make the mask reproducible
np.random.seed(2 )
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =int((config.image_size // config.patch_size) ** 2 )
_lowerCAmelCase =np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__A ):
_lowerCAmelCase ={}
for k, v in inputs_dict.items():
if tf.is_tensor(snake_case_ ):
_lowerCAmelCase =v.numpy()
else:
_lowerCAmelCase =np.array(snake_case_ )
return inputs_np_dict
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(snake_case_ )
_lowerCAmelCase =self._prepare_for_class(snake_case_ , snake_case_ )
_lowerCAmelCase =prepare_numpy_arrays(snake_case_ )
_lowerCAmelCase =model(snake_case_ , noise=snake_case_ )
_lowerCAmelCase =model(**snake_case_ , noise=snake_case_ )
self.assert_outputs_same(snake_case_ , snake_case_ )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Any:
# make masks reproducible
np.random.seed(2 )
_lowerCAmelCase =int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_lowerCAmelCase =np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCAmelCase =tf.constant(snake_case_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCAmelCase =tf_noise
super().check_pt_tf_models(snake_case_ , snake_case_ , snake_case_ )
def UpperCamelCase__ ( self ) -> int:
# make mask reproducible
np.random.seed(2 )
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase ={
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(snake_case_ )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(snake_case_ , snake_case_ ),)
if isinstance(snake_case_ , snake_case_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(snake_case_ , '_keras_serializable' , snake_case_ )
}
_lowerCAmelCase =int((config.image_size // config.patch_size) ** 2 )
_lowerCAmelCase =np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCAmelCase =tf.convert_to_tensor(snake_case_ )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
_lowerCAmelCase =main_layer_class(snake_case_ )
_lowerCAmelCase ={
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_lowerCAmelCase =tf.keras.Model(snake_case_ , outputs=main_layer(snake_case_ ) )
_lowerCAmelCase =model(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase =os.path.join(snake_case_ , 'keras_model.h5' )
model.save(snake_case_ )
_lowerCAmelCase =tf.keras.models.load_model(
snake_case_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(snake_case_ , tf.keras.Model )
_lowerCAmelCase =model(snake_case_ )
self.assert_outputs_same(snake_case_ , snake_case_ )
@slow
def UpperCamelCase__ ( self ) -> Any:
# make mask reproducible
np.random.seed(2 )
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =int((config.image_size // config.patch_size) ** 2 )
_lowerCAmelCase =np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(snake_case_ )
_lowerCAmelCase =self._prepare_for_class(snake_case_ , snake_case_ )
_lowerCAmelCase =model(snake_case_ , noise=snake_case_ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCAmelCase =outputs.last_hidden_state.numpy()
_lowerCAmelCase =0
else:
_lowerCAmelCase =outputs.logits.numpy()
_lowerCAmelCase =0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
_lowerCAmelCase =model_class.from_pretrained(snake_case_ )
_lowerCAmelCase =model(snake_case_ , noise=snake_case_ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCAmelCase =after_outputs['last_hidden_state'].numpy()
_lowerCAmelCase =0
else:
_lowerCAmelCase =after_outputs['logits'].numpy()
_lowerCAmelCase =0
_lowerCAmelCase =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1E-5 )
def UpperCamelCase__ ( self ) -> Optional[Any]:
# make mask reproducible
np.random.seed(2 )
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =int((config.image_size // config.patch_size) ** 2 )
_lowerCAmelCase =np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(snake_case_ )
_lowerCAmelCase =self._prepare_for_class(snake_case_ , snake_case_ )
_lowerCAmelCase =model(snake_case_ , noise=snake_case_ )
_lowerCAmelCase =model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(snake_case_ )
_lowerCAmelCase =model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_lowerCAmelCase =model_class.from_config(model.config )
_lowerCAmelCase =new_model(snake_case_ ) # Build model
new_model.set_weights(model.get_weights() )
_lowerCAmelCase =new_model(snake_case_ , noise=snake_case_ )
self.assert_outputs_same(snake_case_ , snake_case_ )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def UpperCamelCase__ ( self ) -> List[Any]:
pass
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(snake_case_ )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self ) -> int:
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ) -> Dict:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_lowerCAmelCase =TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=snake_case_ , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCAmelCase =ViTMAEConfig()
_lowerCAmelCase =int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCAmelCase =np.random.uniform(size=(1, num_patches) )
# forward pass
_lowerCAmelCase =model(**snake_case_ , noise=snake_case_ )
# verify the logits
_lowerCAmelCase =tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase =tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , snake_case_ , atol=1E-4 )
| 720
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'sequence-classification'
def __init__( self , __A ) -> List[Any]:
if type(__A ) == dict:
_lowerCAmelCase =Namespace(**__A )
_lowerCAmelCase =glue_output_modes[hparams.task]
_lowerCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def UpperCamelCase__ ( self , **__A ) -> Any:
return self.model(**__A )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase =outputs[0]
_lowerCAmelCase =self.trainer.lr_schedulers[0]['scheduler']
_lowerCAmelCase ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.hparams
_lowerCAmelCase =processors[args.task]()
_lowerCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase =self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , __A )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_lowerCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_lowerCAmelCase =convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , __A )
torch.save(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A = False ) -> DataLoader:
_lowerCAmelCase ='dev' if mode == 'test' else mode
_lowerCAmelCase =self._feature_file(__A )
logger.info('Loading features from cached file %s' , __A )
_lowerCAmelCase =torch.load(__A )
_lowerCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def UpperCamelCase__ ( self , __A , __A ) -> List[str]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase , _lowerCAmelCase =outputs[:2]
_lowerCAmelCase =logits.detach().cpu().numpy()
_lowerCAmelCase =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __A ) -> tuple:
_lowerCAmelCase =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_lowerCAmelCase =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =np.squeeze(__A )
_lowerCAmelCase =np.concatenate([x['target'] for x in outputs] , axis=0 )
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_lowerCAmelCase =dict(results.items() )
_lowerCAmelCase =results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __A , __A ) -> Any:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'--max_seq_length' , default=128 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=__A , required=__A , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=__A , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
add_generic_args(a__ , os.getcwd() )
_lowerCAmelCase =GLUETransformer.add_model_specific_args(a__ , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase =os.path.join(
'./results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_lowerCAmelCase =GLUETransformer(a__ )
_lowerCAmelCase =generic_train(a__ , a__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=a__ ) )
_lowerCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=4 , ) -> Union[str, Any]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_attention_mask
_lowerCAmelCase =use_token_type_ids
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =type_sequence_label_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =num_choices
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_attention_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =None
if self.use_token_type_ids:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase =config_and_inputs
_lowerCAmelCase =True
_lowerCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
"""simple docstring"""
lowercase : int = True
lowercase : int = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCamelCase__ ( self ) -> Tuple:
for model_class_name in self.all_model_classes:
_lowerCAmelCase =model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_lowercase )
_lowerCAmelCase =model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_lowercase )
_lowerCAmelCase =np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
_lowerCAmelCase =model(_lowercase )[0]
_lowerCAmelCase =[1, 11, 5_0265]
self.assertEqual(list(output.shape ) , _lowercase )
# compare the actual values for a slice.
_lowerCAmelCase =np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_lowercase )
_lowerCAmelCase =np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
_lowerCAmelCase =model(_lowercase )[0]
# compare the actual values for a slice.
_lowerCAmelCase =np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 721
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( a__ , a__ , a__=False ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ) and isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase =len(set_a.intersection(snake_case__ ) )
if alternative_union:
_lowerCAmelCase =len(snake_case__ ) + len(snake_case__ )
else:
_lowerCAmelCase =len(set_a.union(snake_case__ ) )
return intersection / union
if isinstance(snake_case__ , (list, tuple) ) and isinstance(snake_case__ , (list, tuple) ):
_lowerCAmelCase =[element for element in set_a if element in set_b]
if alternative_union:
_lowerCAmelCase =len(snake_case__ ) + len(snake_case__ )
return len(snake_case__ ) / union
else:
_lowerCAmelCase =set_a + [element for element in set_b if element not in set_a]
return len(snake_case__ ) / len(snake_case__ )
return len(snake_case__ ) / len(snake_case__ )
return None
if __name__ == "__main__":
lowercase_ = {'''a''', '''b''', '''c''', '''d''', '''e'''}
lowercase_ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 700
|
'''simple docstring'''
from PIL import Image
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def brightness(a__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(a__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 58
| 0
|
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ) -> List[Any]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__A ):
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
_lowerCAmelCase =FlaxAutoModel.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def UpperCamelCase__ ( self ) -> Union[str, Any]:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__A ):
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
_lowerCAmelCase =FlaxAutoModel.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def UpperCamelCase__ ( self ) -> Union[str, Any]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
_lowerCAmelCase =AutoTokenizer.from_pretrained(__A )
_lowerCAmelCase =FlaxBertModel.from_pretrained(__A )
_lowerCAmelCase =tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__A ):
return model(**__A )
eval(**__A ).block_until_ready()
@slow
def UpperCamelCase__ ( self ) -> Tuple:
for model_name in ["roberta-base", "roberta-large"]:
_lowerCAmelCase =AutoTokenizer.from_pretrained(__A )
_lowerCAmelCase =FlaxRobertaModel.from_pretrained(__A )
_lowerCAmelCase =tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__A ):
return model(**__A )
eval(**__A ).block_until_ready()
def UpperCamelCase__ ( self ) -> List[str]:
with self.assertRaisesRegex(
__A , 'bert-base is not a local folder and is not a valid model identifier' ):
_lowerCAmelCase =FlaxAutoModel.from_pretrained('bert-base' )
def UpperCamelCase__ ( self ) -> Dict:
with self.assertRaisesRegex(
__A , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_lowerCAmelCase =FlaxAutoModel.from_pretrained(__A , revision='aaaaaa' )
def UpperCamelCase__ ( self ) -> List[Any]:
with self.assertRaisesRegex(
__A , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
_lowerCAmelCase =FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCamelCase__ ( self ) -> List[Any]:
with self.assertRaisesRegex(__A , 'Use `from_pt=True` to load this model' ):
_lowerCAmelCase =FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 701
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[Any]:
_lowerCAmelCase =TOKEN
HfFolder.save_token(__A )
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id='test-config' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='valid_org/test-config-org' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> List[str]:
CustomConfig.register_for_auto_class()
_lowerCAmelCase =CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowerCAmelCase =AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase =c.n_embd + 1 # int
_lowerCAmelCase =c.resid_pdrop + 1.0 # float
_lowerCAmelCase =not c.scale_attn_weights # bool
_lowerCAmelCase =c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__A , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__A , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__A , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__A , c.summary_type , 'mismatch for key: summary_type' )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =PretrainedConfig()
_lowerCAmelCase =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowerCAmelCase =[key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(__A )}.''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase =mock.Mock()
_lowerCAmelCase =500
_lowerCAmelCase ={}
_lowerCAmelCase =HTTPError
_lowerCAmelCase ={}
# Download this model to make sure it's in the cache.
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__A ) as mock_head:
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase =BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =AutoConfig.from_pretrained('bert-base-cased' )
_lowerCAmelCase =['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
_lowerCAmelCase =2
json.dump(configuration.to_dict() , open(os.path.join(__A , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase =['config.42.0.0.json']
_lowerCAmelCase =768
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , 'config.4.0.0.json' ) , os.path.join(__A , 'config.42.0.0.json' ) )
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase__ ( self ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase ='hf-internal-testing/test-two-configs'
import transformers as new_transformers
_lowerCAmelCase ='v4.0.0'
_lowerCAmelCase , _lowerCAmelCase =new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase ='v3.0.0'
_lowerCAmelCase =old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 768 )
| 58
| 0
|
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCamelCase__ ( a__ , a__ , a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =StableDiffusionPipeline.from_pretrained(a__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_lowerCAmelCase =load_file(a__ )
_lowerCAmelCase =[]
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_lowerCAmelCase =key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
_lowerCAmelCase =pipeline.text_encoder
else:
_lowerCAmelCase =key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
_lowerCAmelCase =pipeline.unet
# find the target layer
_lowerCAmelCase =layer_infos.pop(0 )
while len(a__ ) > -1:
try:
_lowerCAmelCase =curr_layer.__getattr__(a__ )
if len(a__ ) > 0:
_lowerCAmelCase =layer_infos.pop(0 )
elif len(a__ ) == 0:
break
except Exception:
if len(a__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_lowerCAmelCase =layer_infos.pop(0 )
_lowerCAmelCase =[]
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(a__ )
else:
pair_keys.append(a__ )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_lowerCAmelCase =state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_lowerCAmelCase =state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a__ , a__ ).unsqueeze(2 ).unsqueeze(3 )
else:
_lowerCAmelCase =state_dict[pair_keys[0]].to(torch.floataa )
_lowerCAmelCase =state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a__ , a__ )
# update visited list
for item in pair_keys:
visited.append(a__ )
return pipeline
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
lowercase_ = parser.parse_args()
lowercase_ = args.base_model_path
lowercase_ = args.checkpoint_path
lowercase_ = args.dump_path
lowercase_ = args.lora_prefix_unet
lowercase_ = args.lora_prefix_text_encoder
lowercase_ = args.alpha
lowercase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowercase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 702
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
'''simple docstring'''
import pytest
lowercase_ = """__dummy_dataset1__"""
lowercase_ = """
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def UpperCamelCase__ ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase__ ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =dataset_loading_script_name
_lowerCAmelCase =tmp_path / "datasets" / script_name
script_dir.mkdir(parents=snake_case_ )
_lowerCAmelCase =script_dir / F'''{script_name}.py'''
with open(snake_case_ , 'w' ) as f:
f.write(snake_case_ )
return str(snake_case_ )
| 703
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =tempfile.mkdtemp()
_lowerCAmelCase =BlipImageProcessor()
_lowerCAmelCase =GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
_lowerCAmelCase =BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
_lowerCAmelCase =InstructBlipProcessor(__A , __A , __A )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self , **__A ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **__A ).tokenizer
def UpperCamelCase__ ( self , **__A ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **__A ).image_processor
def UpperCamelCase__ ( self , **__A ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__A ).qformer_tokenizer
def UpperCamelCase__ ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase =[Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCAmelCase =self.get_image_processor(do_normalize=__A , padding_value=1.0 )
_lowerCAmelCase =InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __A )
self.assertIsInstance(processor.qformer_tokenizer , __A )
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =self.get_qformer_tokenizer()
_lowerCAmelCase =InstructBlipProcessor(
tokenizer=__A , image_processor=__A , qformer_tokenizer=__A )
_lowerCAmelCase =self.prepare_image_inputs()
_lowerCAmelCase =image_processor(__A , return_tensors='np' )
_lowerCAmelCase =processor(images=__A , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =self.get_qformer_tokenizer()
_lowerCAmelCase =InstructBlipProcessor(
tokenizer=__A , image_processor=__A , qformer_tokenizer=__A )
_lowerCAmelCase ="lower newer"
_lowerCAmelCase =processor(text=__A )
_lowerCAmelCase =tokenizer(__A , return_token_type_ids=__A )
_lowerCAmelCase =qformer_tokenizer(__A , return_token_type_ids=__A )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =self.get_qformer_tokenizer()
_lowerCAmelCase =InstructBlipProcessor(
tokenizer=__A , image_processor=__A , qformer_tokenizer=__A )
_lowerCAmelCase ="lower newer"
_lowerCAmelCase =self.prepare_image_inputs()
_lowerCAmelCase =processor(text=__A , images=__A )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =self.get_qformer_tokenizer()
_lowerCAmelCase =InstructBlipProcessor(
tokenizer=__A , image_processor=__A , qformer_tokenizer=__A )
_lowerCAmelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase =processor.batch_decode(__A )
_lowerCAmelCase =tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =self.get_qformer_tokenizer()
_lowerCAmelCase =InstructBlipProcessor(
tokenizer=__A , image_processor=__A , qformer_tokenizer=__A )
_lowerCAmelCase ="lower newer"
_lowerCAmelCase =self.prepare_image_inputs()
_lowerCAmelCase =processor(text=__A , images=__A )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 704
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =len(a__ ) // 2
# choose the middle 3 elements
_lowerCAmelCase =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 705
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.txt'''}
lowercase_ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowercase_ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowercase_ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ConvBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ) -> Union[str, Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
_lowerCAmelCase =getattr(__A , normalizer_state.pop('type' ) )
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =strip_accents
_lowerCAmelCase =tokenize_chinese_chars
_lowerCAmelCase =normalizer_class(**__A )
_lowerCAmelCase =do_lower_case
def UpperCamelCase__ ( self , __A , __A=None ) -> int:
_lowerCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 58
| 0
|
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A=100 , __A=13 , __A=30 , __A=2 , __A=3 , __A=True , __A=True , __A=32 , __A=4 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=10 , __A=0.02 , __A=3 , __A=None , __A=[0, 1, 2, 3] , ) -> Optional[int]:
_lowerCAmelCase =parent
_lowerCAmelCase =100
_lowerCAmelCase =batch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =patch_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =is_training
_lowerCAmelCase =use_labels
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =type_sequence_label_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =scope
_lowerCAmelCase =out_indices
_lowerCAmelCase =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase =(image_size // patch_size) ** 2
_lowerCAmelCase =num_patches + 1
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =None
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase =self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase__ ( self ) -> Dict:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase =BeitModel(config=a_ )
model.to(a_ )
model.eval()
_lowerCAmelCase =model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Optional[int]:
_lowerCAmelCase =BeitForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
_lowerCAmelCase =model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Tuple:
_lowerCAmelCase =self.type_sequence_label_size
_lowerCAmelCase =BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_lowerCAmelCase =model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase =1
_lowerCAmelCase =BeitForImageClassification(a_ )
model.to(a_ )
model.eval()
_lowerCAmelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase =model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> List[str]:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =BeitForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
_lowerCAmelCase =model(a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_lowerCAmelCase =model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __a , __a , unittest.TestCase):
"""simple docstring"""
lowercase : Optional[int] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase : Optional[int] = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase : Optional[int] = False
lowercase : int = False
lowercase : Dict = False
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =BeitModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def UpperCamelCase__ ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def UpperCamelCase__ ( self ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCamelCase__ ( self ) -> List[str]:
pass
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(a_ )
_lowerCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a_ )
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def UpperCamelCase__ ( self ) -> int:
if not self.model_tester.is_training:
return
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]:
continue
_lowerCAmelCase =model_class(a_ )
model.to(a_ )
model.train()
_lowerCAmelCase =self._prepare_for_class(a_ , a_ , return_labels=a_ )
_lowerCAmelCase =model(**a_ ).loss
loss.backward()
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCAmelCase =False
_lowerCAmelCase =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_lowerCAmelCase =model_class(a_ )
model.gradient_checkpointing_enable()
model.to(a_ )
model.train()
_lowerCAmelCase =self._prepare_for_class(a_ , a_ , return_labels=a_ )
_lowerCAmelCase =model(**a_ ).loss
loss.backward()
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =_config_zero_init(a_ )
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(config=a_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCamelCase__ ( self ) -> str:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =BeitModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self ) -> Dict:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(a_ )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=a_ , return_tensors='pt' ).pixel_values.to(a_ )
# prepare bool_masked_pos
_lowerCAmelCase =torch.ones((1, 196) , dtype=torch.bool ).to(a_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase =model(pixel_values=a_ , bool_masked_pos=a_ )
_lowerCAmelCase =outputs.logits
# verify the logits
_lowerCAmelCase =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , a_ )
_lowerCAmelCase =torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , a_ , atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(a_ )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=a_ , return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase =model(**a_ )
_lowerCAmelCase =outputs.logits
# verify the logits
_lowerCAmelCase =torch.Size((1, 1000) )
self.assertEqual(logits.shape , a_ )
_lowerCAmelCase =torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3] , a_ , atol=1E-4 ) )
_lowerCAmelCase =281
self.assertEqual(logits.argmax(-1 ).item() , a_ )
@slow
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
a_ )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=a_ , return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase =model(**a_ )
_lowerCAmelCase =outputs.logits
# verify the logits
_lowerCAmelCase =torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , a_ )
_lowerCAmelCase =torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ )
self.assertTrue(torch.allclose(logits[0, :3] , a_ , atol=1E-4 ) )
_lowerCAmelCase =2396
self.assertEqual(logits.argmax(-1 ).item() , a_ )
@slow
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
_lowerCAmelCase =model.to(a_ )
_lowerCAmelCase =BeitImageProcessor(do_resize=a_ , size=640 , do_center_crop=a_ )
_lowerCAmelCase =load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_lowerCAmelCase =Image.open(ds[0]['file'] )
_lowerCAmelCase =image_processor(images=a_ , return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase =model(**a_ )
_lowerCAmelCase =outputs.logits
# verify the logits
_lowerCAmelCase =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , a_ )
_lowerCAmelCase =version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
_lowerCAmelCase =torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=a_ , )
else:
_lowerCAmelCase =torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=a_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
_lowerCAmelCase =model.to(a_ )
_lowerCAmelCase =BeitImageProcessor(do_resize=a_ , size=640 , do_center_crop=a_ )
_lowerCAmelCase =load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_lowerCAmelCase =Image.open(ds[0]['file'] )
_lowerCAmelCase =image_processor(images=a_ , return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase =model(**a_ )
_lowerCAmelCase =outputs.logits.detach().cpu()
_lowerCAmelCase =image_processor.post_process_semantic_segmentation(outputs=a_ , target_sizes=[(500, 300)] )
_lowerCAmelCase =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , a_ )
_lowerCAmelCase =image_processor.post_process_semantic_segmentation(outputs=a_ )
_lowerCAmelCase =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , a_ )
| 706
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = ['image_processor', 'tokenizer']
lowercase : Any = 'CLIPImageProcessor'
lowercase : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __A=None , __A=None , **__A ) -> str:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
_lowerCAmelCase =kwargs.pop('feature_extractor' )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Optional[int]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 58
| 0
|
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.17.0.dev0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowercase_ = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
lowercase : str = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowercase : Tuple = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
lowercase : Any = field(
default=10_24 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : List[str] = field(
default=__lowercase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowercase : List[Any] = field(
default=__lowercase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
lowercase : Dict = field(
default=__lowercase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowercase : Dict = field(
default=__lowercase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowercase : Union[str, Any] = field(
default=__lowercase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
lowercase : Union[str, Any] = field(
default=__lowercase , metadata={'help': 'A csv or a json file containing the training data.'})
lowercase : Optional[Any] = field(
default=__lowercase , metadata={'help': 'A csv or a json file containing the validation data.'})
lowercase : Optional[int] = field(default=__lowercase , metadata={'help': 'A csv or a json file containing the test data.'})
def UpperCamelCase__ ( self ) -> Union[str, Any]:
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
_lowerCAmelCase =self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowerCAmelCase =self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
lowercase : Optional[int] = field(
default=__lowercase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowercase : Dict = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
lowercase : int = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
lowercase : str = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowercase : Any = field(
default=__lowercase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowercase : Union[str, Any] = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowercase : int = field(
default=__lowercase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase =training_args.get_process_log_level()
logger.setLevel(__snake_case )
datasets.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowerCAmelCase =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowerCAmelCase =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowerCAmelCase ={'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowerCAmelCase =data_args.train_file.split('.' )[-1]
_lowerCAmelCase =data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowerCAmelCase =data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
_lowerCAmelCase =load_dataset('csv' , data_files=__snake_case , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowerCAmelCase =load_dataset('json' , data_files=__snake_case , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowerCAmelCase =raw_datasets['train'].features['label'].names
_lowerCAmelCase =len(__snake_case )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowerCAmelCase =TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__snake_case , )
_lowerCAmelCase =BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowerCAmelCase ='max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowerCAmelCase =False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowerCAmelCase ={'Refused': 0, 'Entailed': 1}
_lowerCAmelCase ={0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_lowerCAmelCase =min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(a__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(a__ ):
_lowerCAmelCase =[_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
_lowerCAmelCase =pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowerCAmelCase =examples['statement']
_lowerCAmelCase =list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
_lowerCAmelCase =tokenizer(__snake_case , __snake_case , padding=__snake_case , max_length=__snake_case , truncation=__snake_case )
_lowerCAmelCase =examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
_lowerCAmelCase =raw_datasets.map(
__snake_case , batched=__snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_lowerCAmelCase =raw_datasets['train']
if data_args.max_train_samples is not None:
_lowerCAmelCase =train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_lowerCAmelCase =raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowerCAmelCase =eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
_lowerCAmelCase =raw_datasets['test']
if data_args.max_predict_samples is not None:
_lowerCAmelCase =predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__snake_case ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a__ ):
_lowerCAmelCase =p.predictions[0] if isinstance(p.predictions , __snake_case ) else p.predictions
_lowerCAmelCase =np.argmax(__snake_case , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowerCAmelCase =default_data_collator
elif training_args.fpaa:
_lowerCAmelCase =DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 )
else:
_lowerCAmelCase =None
# Initialize our Trainer
_lowerCAmelCase =Trainer(
model=__snake_case , args=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__snake_case , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
_lowerCAmelCase =None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase =last_checkpoint
_lowerCAmelCase =trainer.train(resume_from_checkpoint=__snake_case )
_lowerCAmelCase =train_result.metrics
_lowerCAmelCase =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
_lowerCAmelCase =min(__snake_case , len(__snake_case ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , __snake_case )
trainer.save_metrics('train' , __snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowerCAmelCase =trainer.evaluate(eval_dataset=__snake_case )
_lowerCAmelCase =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__snake_case )
_lowerCAmelCase =min(__snake_case , len(__snake_case ) )
trainer.log_metrics('eval' , __snake_case )
trainer.save_metrics('eval' , __snake_case )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowerCAmelCase =predict_dataset.remove_columns('label' )
_lowerCAmelCase =trainer.predict(__snake_case , metric_key_prefix='predict' ).predictions
_lowerCAmelCase =np.argmax(__snake_case , axis=1 )
_lowerCAmelCase =os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(__snake_case , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(__snake_case ):
_lowerCAmelCase =label_list[item]
writer.write(F'''{index}\t{item}\n''' )
_lowerCAmelCase ={'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 707
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ) -> str:
super().__init__()
_lowerCAmelCase =nn.Sequential(
nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , )
_lowerCAmelCase =nn.Embedding(__A , __A )
_lowerCAmelCase =False
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.ModuleList()
for lyr_num in range(__A ):
# FiLM conditional T5 decoder
_lowerCAmelCase =DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A )
self.decoders.append(__A )
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Any:
_lowerCAmelCase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase =self.conditioning_emb(__A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase =torch.broadcast_to(
torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase =self.position_encoding(__A )
_lowerCAmelCase =self.continuous_inputs_projection(__A )
inputs += position_encodings
_lowerCAmelCase =self.dropout(__A )
# decoder: No padding present.
_lowerCAmelCase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase =[(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase =lyr(
__A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0]
_lowerCAmelCase =self.decoder_norm(__A )
_lowerCAmelCase =self.post_dropout(__A )
_lowerCAmelCase =self.spec_out(__A )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Any:
_lowerCAmelCase =self.layer[0](
__A , conditioning_emb=__A , attention_mask=__A , )
if encoder_hidden_states is not None:
_lowerCAmelCase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase =self.layer[1](
__A , key_value_states=__A , attention_mask=__A , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase =self.layer[-1](__A , __A )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> List[Any]:
# pre_self_attention_layer_norm
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.FiLMLayer(__A , __A )
# Self-attention block
_lowerCAmelCase =self.attention(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[int]:
super().__init__()
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> Tuple:
_lowerCAmelCase =self.layer_norm(__A )
_lowerCAmelCase =self.attention(
__A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.film(__A , __A )
_lowerCAmelCase =self.DenseReluDense(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(__A )
_lowerCAmelCase =NewGELUActivation()
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =self.act(self.wi_a(__A ) )
_lowerCAmelCase =self.wi_a(__A )
_lowerCAmelCase =hidden_gelu * hidden_linear
_lowerCAmelCase =self.dropout(__A )
_lowerCAmelCase =self.wo(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A=1E-6 ) -> int:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.ones(__A ) )
_lowerCAmelCase =eps
def UpperCamelCase__ ( self , __A ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A )
_lowerCAmelCase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__A , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , out_features * 2 , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.scale_bias(__A )
_lowerCAmelCase , _lowerCAmelCase =torch.chunk(__A , 2 , -1 )
_lowerCAmelCase =x * (1 + scale) + shift
return x
| 58
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def UpperCamelCase__ ( a__ , a__=False , a__=False , a__=False ):
'''simple docstring'''
_lowerCAmelCase =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_lowerCAmelCase ="""vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase =state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' )
_lowerCAmelCase =state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase =in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase =in_proj_bias[: config.hidden_size]
_lowerCAmelCase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase =in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase =in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =dct.pop(UpperCAmelCase__ )
_lowerCAmelCase =val
@torch.no_grad()
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=UpperCAmelCase__ )
_lowerCAmelCase =False
_lowerCAmelCase =False
_lowerCAmelCase =False
_lowerCAmelCase =False
if "vqa" in checkpoint_url:
_lowerCAmelCase =True
_lowerCAmelCase =3_1_2_9
_lowerCAmelCase ="""huggingface/label-files"""
_lowerCAmelCase ="""vqa2-id2label.json"""
_lowerCAmelCase =json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase ={int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
_lowerCAmelCase =idalabel
_lowerCAmelCase ={v: k for k, v in idalabel.items()}
_lowerCAmelCase =ViltForQuestionAnswering(UpperCAmelCase__ )
elif "nlvr" in checkpoint_url:
_lowerCAmelCase =True
_lowerCAmelCase =2
_lowerCAmelCase ={0: """False""", 1: """True"""}
_lowerCAmelCase ={v: k for k, v in config.idalabel.items()}
_lowerCAmelCase =3
_lowerCAmelCase =ViltForImagesAndTextClassification(UpperCAmelCase__ )
elif "irtr" in checkpoint_url:
_lowerCAmelCase =True
_lowerCAmelCase =ViltForImageAndTextRetrieval(UpperCAmelCase__ )
elif "mlm_itm" in checkpoint_url:
_lowerCAmelCase =True
_lowerCAmelCase =ViltForMaskedLM(UpperCAmelCase__ )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase =torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location='cpu' )["""state_dict"""]
_lowerCAmelCase =create_rename_keys(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ )
if mlm_model or irtr_model:
_lowerCAmelCase =["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCAmelCase =model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCAmelCase__ )
# Define processor
_lowerCAmelCase =ViltImageProcessor(size=3_8_4 )
_lowerCAmelCase =BertTokenizer.from_pretrained('bert-base-uncased' )
_lowerCAmelCase =ViltProcessor(UpperCAmelCase__ , UpperCAmelCase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCAmelCase =Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=UpperCAmelCase__ ).raw )
_lowerCAmelCase =Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=UpperCAmelCase__ ).raw )
_lowerCAmelCase =(
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
_lowerCAmelCase =processor(UpperCAmelCase__ , UpperCAmelCase__ , return_tensors='pt' )
_lowerCAmelCase =processor(UpperCAmelCase__ , UpperCAmelCase__ , return_tensors='pt' )
_lowerCAmelCase =model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCAmelCase =Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=UpperCAmelCase__ ).raw )
if mlm_model:
_lowerCAmelCase ="""a bunch of [MASK] laying on a [MASK]."""
else:
_lowerCAmelCase ="""How many cats are there?"""
_lowerCAmelCase =processor(UpperCAmelCase__ , UpperCAmelCase__ , return_tensors='pt' )
_lowerCAmelCase =model(**UpperCAmelCase__ )
# Verify outputs
if mlm_model:
_lowerCAmelCase =torch.Size([1, 1_1, 3_0_5_2_2] )
_lowerCAmelCase =torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCAmelCase__ , atol=1E-4 )
# verify masked token prediction equals "cats"
_lowerCAmelCase =outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCAmelCase =torch.Size([1, 3_1_2_9] )
_lowerCAmelCase =torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCAmelCase__ , atol=1E-4 )
# verify vqa prediction equals "2"
_lowerCAmelCase =outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCAmelCase =torch.Size([1, 2] )
_lowerCAmelCase =torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowercase_ = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 708
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( a__ , a__ , a__ = 0 , a__ = 0 ):
'''simple docstring'''
_lowerCAmelCase =right or len(_lowercase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_lowercase , _lowercase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCAmelCase__ ) )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
if index == len(UpperCAmelCase__ ):
return True
# Recursive Step
for i in range(UpperCAmelCase__ ):
if valid_coloring(graph[index] , UpperCAmelCase__ , UpperCAmelCase__ ):
# Color current vertex
_lowerCAmelCase =i
# Validate coloring
if util_color(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , index + 1 ):
return True
# Backtrack
_lowerCAmelCase =-1
return False
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =[-1] * len(UpperCAmelCase__ )
if util_color(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , 0 ):
return colored_vertices
return []
| 710
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCAmelCase , _lowerCAmelCase =array[indexa], array[indexa]
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
if length > 1:
_lowerCAmelCase =int(length / 2 )
for i in range(_snake_case , low + middle ):
comp_and_swap(_snake_case , _snake_case , i + middle , _snake_case )
bitonic_merge(_snake_case , _snake_case , _snake_case , _snake_case )
bitonic_merge(_snake_case , low + middle , _snake_case , _snake_case )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
if length > 1:
_lowerCAmelCase =int(length / 2 )
bitonic_sort(_snake_case , _snake_case , _snake_case , 1 )
bitonic_sort(_snake_case , low + middle , _snake_case , 0 )
bitonic_merge(_snake_case , _snake_case , _snake_case , _snake_case )
if __name__ == "__main__":
lowercase_ = input('''Enter numbers separated by a comma:\n''').strip()
lowercase_ = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 711
|
'''simple docstring'''
def UpperCamelCase__ ( a__ = 1_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =2**power
_lowerCAmelCase =0
while n:
_lowerCAmelCase , _lowerCAmelCase =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 712
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCAmelCase =set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =torch.load(__UpperCamelCase , map_location='cpu' )
if "model" in sd.keys():
_lowerCAmelCase =torch.load(__UpperCamelCase , map_location='cpu' )['model']
# pop unnecessary weights
_lowerCAmelCase =[
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(__UpperCamelCase )
_lowerCAmelCase ={
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCAmelCase =sd.pop(__UpperCamelCase )
_lowerCAmelCase =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCAmelCase =sd[key]
# We split QKV in separate Q,K,V
_lowerCAmelCase =key.replace('.qkv_proj.' , '.q_proj.' )
_lowerCAmelCase =key.replace('.qkv_proj.' , '.k_proj.' )
_lowerCAmelCase =key.replace('.qkv_proj.' , '.v_proj.' )
_lowerCAmelCase =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =torch.split(__UpperCamelCase , depth // 3 , dim=0 )
_lowerCAmelCase =q
_lowerCAmelCase =k
_lowerCAmelCase =v
del sd[key]
return sd
@torch.no_grad()
def UpperCamelCase__ ( a__ , a__ , a__=None ):
'''simple docstring'''
_lowerCAmelCase =load_checkpoint(__UpperCamelCase )
if config is not None:
_lowerCAmelCase =OPTConfig.from_pretrained(__UpperCamelCase )
else:
_lowerCAmelCase =OPTConfig()
_lowerCAmelCase =OPTModel(__UpperCamelCase ).half().eval()
model.load_state_dict(__UpperCamelCase )
# Check results
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
lowercase_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 713
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 58
| 0
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowercase_ = ''' \"\"\"
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) )
_lowerCAmelCase =self.diffusers_dir
shutil.copy(
os.path.join(__A , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase ="""src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase__ ( self , __A , __A , __A , __A=None ) -> Optional[Any]:
_lowerCAmelCase =comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_lowerCAmelCase =comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_lowerCAmelCase =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCAmelCase =black.format_str(__A , mode=__A )
_lowerCAmelCase =os.path.join(self.diffusers_dir , 'new_code.py' )
with open(__A , 'w' , newline='\n' ) as f:
f.write(__A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__A )
with open(__A , 'r' ) as f:
self.assertTrue(f.read() , __A )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(__A , __A )
def UpperCamelCase__ ( self ) -> Tuple:
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , __A , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , __A ) , )
# Copy consistency with a really long name
_lowerCAmelCase ="""TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub('Bert' , __A , __A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , __A , overwrite_result=re.sub('DDPM' , 'Test' , __A ) , )
| 714
|
'''simple docstring'''
lowercase_ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='Morse code here!'
print(a__ )
_lowerCAmelCase =encrypt(a__ )
print(a__ )
_lowerCAmelCase =decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase):
"""simple docstring"""
lowercase : Dict = XGLMTokenizer
lowercase : Dict = XGLMTokenizerFast
lowercase : Tuple = True
lowercase : List[str] = True
def UpperCamelCase__ ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase =XGLMTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase ='<pad>'
_lowerCAmelCase =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(__lowercase ) , 1008 )
def UpperCamelCase__ ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =XGLMTokenizer(__lowercase , keep_accents=__lowercase )
_lowerCAmelCase =tokenizer.tokenize('This is a test' )
self.assertListEqual(__lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCAmelCase =tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def UpperCamelCase__ ( self ) -> Optional[int]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowercase , f.name )
_lowerCAmelCase =XGLMTokenizer(f.name , keep_accents=__lowercase )
_lowerCAmelCase =pickle.dumps(__lowercase )
pickle.loads(__lowercase )
def UpperCamelCase__ ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =self.get_rust_tokenizer()
_lowerCAmelCase ='I was born in 92000, and this is falsé.'
_lowerCAmelCase =tokenizer.tokenize(__lowercase )
_lowerCAmelCase =rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
_lowerCAmelCase =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
_lowerCAmelCase =rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
_lowerCAmelCase =self.get_rust_tokenizer()
_lowerCAmelCase =tokenizer.encode(__lowercase )
_lowerCAmelCase =rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase ='Hello World!'
_lowerCAmelCase =[2, 3_1227, 4447, 35]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@slow
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
_lowerCAmelCase =[2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@slow
def UpperCamelCase__ ( self ) -> Union[str, Any]:
# fmt: off
_lowerCAmelCase ={
'input_ids': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='facebook/xglm-564M' , padding=__lowercase , )
| 715
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'data2vec-text'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 58
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or number < 0:
raise ValueError('Input must be a non-negative integer' )
_lowerCAmelCase =0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = IFPipeline
lowercase : Tuple = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : int = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase__ ( self ) -> str:
return self._get_dummy_components()
def UpperCamelCase__ ( self , __A , __A=0 ) -> int:
if str(__A ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(__A )
else:
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(__A )
_lowerCAmelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ) -> str:
self._test_save_load_local()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
# if
_lowerCAmelCase =IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase =IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__A , tokenizer=__A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_lowerCAmelCase , _lowerCAmelCase =pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase =None
_lowerCAmelCase =None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase =IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase =IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase =IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase =IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__A , __A , __A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 58
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
lowercase : Dict = KandinskyVaaPriorPipeline
lowercase : Dict = ["""prompt"""]
lowercase : Dict = ["""prompt""", """negative_prompt"""]
lowercase : Dict = [
"""num_images_per_prompt""",
"""generator""",
"""num_inference_steps""",
"""latents""",
"""negative_prompt""",
"""guidance_scale""",
"""output_type""",
"""return_dict""",
]
lowercase : List[Any] = False
@property
def UpperCamelCase__ ( self ) -> Dict:
return 32
@property
def UpperCamelCase__ ( self ) -> Tuple:
return 32
@property
def UpperCamelCase__ ( self ) -> List[Any]:
return self.time_input_dim
@property
def UpperCamelCase__ ( self ) -> str:
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self ) -> str:
return 100
@property
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCamelCase__ ( self ) -> List[str]:
torch.manual_seed(0 )
_lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__A )
@property
def UpperCamelCase__ ( self ) -> Dict:
torch.manual_seed(0 )
_lowerCAmelCase ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
_lowerCAmelCase =PriorTransformer(**__A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_lowerCAmelCase =nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase__ ( self ) -> List[str]:
torch.manual_seed(0 )
_lowerCAmelCase =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
_lowerCAmelCase =CLIPVisionModelWithProjection(__A )
return model
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =CLIPImageProcessor(
crop_size=224 , do_center_crop=__A , do_normalize=__A , do_resize=__A , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =self.dummy_prior
_lowerCAmelCase =self.dummy_image_encoder
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =self.dummy_tokenizer
_lowerCAmelCase =self.dummy_image_processor
_lowerCAmelCase =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__A , clip_sample_range=10.0 , )
_lowerCAmelCase ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def UpperCamelCase__ ( self , __A , __A=0 ) -> Dict:
if str(__A ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(__A )
else:
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(__A )
_lowerCAmelCase ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase ='cpu'
_lowerCAmelCase =self.get_dummy_components()
_lowerCAmelCase =self.pipeline_class(**__A )
_lowerCAmelCase =pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCAmelCase =pipe(**self.get_dummy_inputs(__A ) )
_lowerCAmelCase =output.image_embeds
_lowerCAmelCase =pipe(
**self.get_dummy_inputs(__A ) , return_dict=__A , )[0]
_lowerCAmelCase =image[0, -10:]
_lowerCAmelCase =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_lowerCAmelCase =np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =torch_device == 'cpu'
_lowerCAmelCase =True
_lowerCAmelCase =False
self._test_inference_batch_single_identical(
test_max_difference=__A , relax_max_difference=__A , test_mean_pixel_difference=__A , )
@skip_mps
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =torch_device == 'cpu'
_lowerCAmelCase =False
self._test_attention_slicing_forward_pass(
test_max_difference=__A , test_mean_pixel_difference=__A , )
| 717
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =[0]
_lowerCAmelCase =[0]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
_lowerCAmelCase =[60]
_lowerCAmelCase =[10]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =3
_lowerCAmelCase =[1, 2, 3]
_lowerCAmelCase =[3, 2, 1]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 5 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =50
_lowerCAmelCase =[60, 100, 120]
_lowerCAmelCase =[10, 20, 30]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 220 )
if __name__ == "__main__":
unittest.main()
| 58
| 0
|
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def UpperCamelCase__ ( a__ , a__ , a__ , a__ , a__=True ):
'''simple docstring'''
model.train()
_lowerCAmelCase =model(__lowerCAmelCase )
_lowerCAmelCase =F.mse_loss(__lowerCAmelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCAmelCase )
def UpperCamelCase__ ( a__ , a__=False ):
'''simple docstring'''
set_seed(4_2 )
_lowerCAmelCase =RegressionModel()
_lowerCAmelCase =deepcopy(__lowerCAmelCase )
_lowerCAmelCase =RegressionDataset(length=8_0 )
_lowerCAmelCase =DataLoader(__lowerCAmelCase , batch_size=1_6 )
model.to(accelerator.device )
if sched:
_lowerCAmelCase =AdamW(params=model.parameters() , lr=1E-3 )
_lowerCAmelCase =AdamW(params=ddp_model.parameters() , lr=1E-3 )
_lowerCAmelCase =LambdaLR(__lowerCAmelCase , lr_lambda=lambda a__ : epoch**0.65 )
_lowerCAmelCase =LambdaLR(__lowerCAmelCase , lr_lambda=lambda a__ : epoch**0.65 )
# Make a copy of `model`
if sched:
_lowerCAmelCase =accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
_lowerCAmelCase =accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =get_training_setup(__lowerCAmelCase )
# Use a single batch
_lowerCAmelCase =next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase =accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
_lowerCAmelCase =ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =get_training_setup(__lowerCAmelCase )
# Use a single batch
_lowerCAmelCase =next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase =accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
_lowerCAmelCase =ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def UpperCamelCase__ ( a__=False , a__=False ):
'''simple docstring'''
_lowerCAmelCase =Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowerCAmelCase =get_training_setup(__lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
_lowerCAmelCase =batch.values()
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase =accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
_lowerCAmelCase =ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
GradientState._reset_state()
def UpperCamelCase__ ( a__=False , a__=False ):
'''simple docstring'''
_lowerCAmelCase =Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowerCAmelCase =get_training_setup(__lowerCAmelCase , __lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
_lowerCAmelCase =batch.values()
# Gather the distributed inputs and targs for the base model
_lowerCAmelCase =accelerator.gather((ddp_input, ddp_target) )
_lowerCAmelCase =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
_lowerCAmelCase =(((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =Accelerator()
_lowerCAmelCase =RegressionDataset(length=8_0 )
_lowerCAmelCase =DataLoader(__lowerCAmelCase , batch_size=1_6 )
_lowerCAmelCase =RegressionDataset(length=9_6 )
_lowerCAmelCase =DataLoader(__lowerCAmelCase , batch_size=1_6 )
_lowerCAmelCase =accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if iteration < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if batch_num < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =Accelerator()
_lowerCAmelCase =accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(__lowerCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(__lowerCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(__lowerCAmelCase , __lowerCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCAmelCase , __lowerCAmelCase )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 718
|
'''simple docstring'''
lowercase_ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((_lowerCAmelCase) , (_lowerCAmelCase)) =extended_euclid(lowercase__ , a % b )
_lowerCAmelCase =a // b
return (y, x - k * y)
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
((_lowerCAmelCase) , (_lowerCAmelCase)) =extended_euclid(lowercase__ , lowercase__ )
_lowerCAmelCase =na * na
_lowerCAmelCase =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
((_lowerCAmelCase) , (_lowerCAmelCase)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
_lowerCAmelCase =(b % n + n) % n
return b
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
_lowerCAmelCase =na * na
_lowerCAmelCase =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 719
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase_ = '''sshleifer/mar_enro_6_3_student'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=__A , )
_lowerCAmelCase =F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
MarianMTModel.from_pretrained(__A )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowerCAmelCase =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowerCAmelCase =F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowerCAmelCase =['finetune.py'] + bash_script.split() + args
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationModule.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowerCAmelCase ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowerCAmelCase =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_lowerCAmelCase =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
_lowerCAmelCase =bash_script.replace('--fp16' , '' )
_lowerCAmelCase =6
_lowerCAmelCase =(
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationDistiller.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowerCAmelCase =distill_main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 58
| 0
|
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 720
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'sequence-classification'
def __init__( self , __A ) -> List[Any]:
if type(__A ) == dict:
_lowerCAmelCase =Namespace(**__A )
_lowerCAmelCase =glue_output_modes[hparams.task]
_lowerCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def UpperCamelCase__ ( self , **__A ) -> Any:
return self.model(**__A )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase =outputs[0]
_lowerCAmelCase =self.trainer.lr_schedulers[0]['scheduler']
_lowerCAmelCase ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.hparams
_lowerCAmelCase =processors[args.task]()
_lowerCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase =self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , __A )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_lowerCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_lowerCAmelCase =convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , __A )
torch.save(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A = False ) -> DataLoader:
_lowerCAmelCase ='dev' if mode == 'test' else mode
_lowerCAmelCase =self._feature_file(__A )
logger.info('Loading features from cached file %s' , __A )
_lowerCAmelCase =torch.load(__A )
_lowerCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def UpperCamelCase__ ( self , __A , __A ) -> List[str]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase , _lowerCAmelCase =outputs[:2]
_lowerCAmelCase =logits.detach().cpu().numpy()
_lowerCAmelCase =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __A ) -> tuple:
_lowerCAmelCase =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_lowerCAmelCase =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =np.squeeze(__A )
_lowerCAmelCase =np.concatenate([x['target'] for x in outputs] , axis=0 )
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_lowerCAmelCase =dict(results.items() )
_lowerCAmelCase =results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __A , __A ) -> Any:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'--max_seq_length' , default=128 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=__A , required=__A , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=__A , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
add_generic_args(a__ , os.getcwd() )
_lowerCAmelCase =GLUETransformer.add_model_specific_args(a__ , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase =os.path.join(
'./results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_lowerCAmelCase =GLUETransformer(a__ )
_lowerCAmelCase =generic_train(a__ , a__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=a__ ) )
_lowerCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =args.log_outputs
_lowerCAmelCase ='_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
_lowerCAmelCase =load_metric('wer' )
_lowerCAmelCase =load_metric('cer' )
# compute metrics
_lowerCAmelCase =wer.compute(references=result['target'] , predictions=result['prediction'] )
_lowerCAmelCase =cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
_lowerCAmelCase =F'''WER: {wer_result}\nCER: {cer_result}'''
print(_UpperCamelCase )
with open(F'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(_UpperCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
_lowerCAmelCase =F'''log_{dataset_id}_predictions.txt'''
_lowerCAmelCase =F'''log_{dataset_id}_targets.txt'''
with open(_UpperCamelCase , 'w' ) as p, open(_UpperCamelCase , 'w' ) as t:
# mapping function to write output
def write_to_file(a__ , a__ ):
p.write(F'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(_UpperCamelCase , with_indices=_UpperCamelCase )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase ='[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
_lowerCAmelCase =re.sub(_UpperCamelCase , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
_lowerCAmelCase =['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
_lowerCAmelCase =' '.join(text.split(_UpperCamelCase ) )
return text
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_UpperCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
_lowerCAmelCase =AutoFeatureExtractor.from_pretrained(args.model_id )
_lowerCAmelCase =feature_extractor.sampling_rate
# resample audio
_lowerCAmelCase =dataset.cast_column('audio' , Audio(sampling_rate=_UpperCamelCase ) )
# load eval pipeline
if args.device is None:
_lowerCAmelCase =0 if torch.cuda.is_available() else -1
_lowerCAmelCase =pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(a__ ):
_lowerCAmelCase =asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
_lowerCAmelCase =prediction['text']
_lowerCAmelCase =normalize_text(batch['sentence'] )
return batch
# run inference on all examples
_lowerCAmelCase =dataset.map(_UpperCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
lowercase_ = parser.parse_args()
main(args)
| 721
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowercase_ = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
lowercase_ = {
'''RUCAIBox/mvp''': 1024,
}
class SCREAMING_SNAKE_CASE ( __a):
"""simple docstring"""
lowercase : List[str] = VOCAB_FILES_NAMES
lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ['input_ids', 'attention_mask']
lowercase : Union[str, Any] = MvpTokenizer
def __init__( self , __A=None , __A=None , __A=None , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , __A=True , **__A , ) -> Optional[int]:
super().__init__(
A__ , A__ , tokenizer_file=A__ , errors=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , trim_offsets=A__ , **A__ , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , A__ ) != add_prefix_space:
_lowerCAmelCase =getattr(A__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase =add_prefix_space
_lowerCAmelCase =pre_tok_class(**A__ )
_lowerCAmelCase =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCAmelCase ='post_processor'
_lowerCAmelCase =getattr(self.backend_tokenizer , A__ , A__ )
if tokenizer_component_instance:
_lowerCAmelCase =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase =tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase =tuple(state['cls'] )
_lowerCAmelCase =False
if state.get('add_prefix_space' , A__ ) != add_prefix_space:
_lowerCAmelCase =add_prefix_space
_lowerCAmelCase =True
if state.get('trim_offsets' , A__ ) != trim_offsets:
_lowerCAmelCase =trim_offsets
_lowerCAmelCase =True
if changes_to_apply:
_lowerCAmelCase =getattr(A__ , state.pop('type' ) )
_lowerCAmelCase =component_class(**A__ )
setattr(self.backend_tokenizer , A__ , A__ )
@property
def UpperCamelCase__ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase__ ( self , __A ) -> Union[str, Any]:
_lowerCAmelCase =AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else value
_lowerCAmelCase =value
def UpperCamelCase__ ( self , *__A , **__A ) -> BatchEncoding:
_lowerCAmelCase =kwargs.get('is_split_into_words' , A__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*A__ , **A__ )
def UpperCamelCase__ ( self , *__A , **__A ) -> BatchEncoding:
_lowerCAmelCase =kwargs.get('is_split_into_words' , A__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*A__ , **A__ )
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def UpperCamelCase__ ( self , __A , __A=None ) -> List[str]:
_lowerCAmelCase =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 700
|
'''simple docstring'''
from PIL import Image
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def brightness(a__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(a__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 58
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowercase : Optional[str] = field(
default=__lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
lowercase : Optional[str] = field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'})
lowercase : Optional[str] = field(
default=__lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
lowercase : bool = field(default=__lowercase , metadata={'help': 'Set this flag to use fast tokenization.'})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase : Optional[str] = field(
default=__lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : str = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'})
lowercase : Optional[str] = field(
default=__lowercase , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
lowercase : int = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : bool = field(
default=__lowercase , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
_lowerCAmelCase =import_module('tasks' )
try:
_lowerCAmelCase =getattr(_SCREAMING_SNAKE_CASE , model_args.task_type )
_lowerCAmelCase =token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_lowerCAmelCase =token_classification_task.get_labels(data_args.labels )
_lowerCAmelCase =dict(enumerate(_SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase =len(_SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(_SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , )
_lowerCAmelCase =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_lowerCAmelCase =AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
_lowerCAmelCase =(
TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_lowerCAmelCase =(
TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(a__ , a__ ) -> Tuple[List[int], List[int]]:
_lowerCAmelCase =np.argmax(_SCREAMING_SNAKE_CASE , axis=2 )
_lowerCAmelCase , _lowerCAmelCase =preds.shape
_lowerCAmelCase =[[] for _ in range(_SCREAMING_SNAKE_CASE )]
_lowerCAmelCase =[[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(a__ ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase =align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"precision": precision_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"recall": recall_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"f1": fa_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
}
# Data collator
_lowerCAmelCase =DataCollatorWithPadding(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_lowerCAmelCase =Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCAmelCase ={}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowerCAmelCase =trainer.evaluate()
_lowerCAmelCase =os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
results.update(_SCREAMING_SNAKE_CASE )
# Predict
if training_args.do_predict:
_lowerCAmelCase =TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =trainer.predict(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase =align_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase =os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
_lowerCAmelCase =os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return results
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 701
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[Any]:
_lowerCAmelCase =TOKEN
HfFolder.save_token(__A )
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id='test-config' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='valid_org/test-config-org' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> List[str]:
CustomConfig.register_for_auto_class()
_lowerCAmelCase =CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowerCAmelCase =AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase =c.n_embd + 1 # int
_lowerCAmelCase =c.resid_pdrop + 1.0 # float
_lowerCAmelCase =not c.scale_attn_weights # bool
_lowerCAmelCase =c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__A , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__A , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__A , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__A , c.summary_type , 'mismatch for key: summary_type' )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =PretrainedConfig()
_lowerCAmelCase =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowerCAmelCase =[key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(__A )}.''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase =mock.Mock()
_lowerCAmelCase =500
_lowerCAmelCase ={}
_lowerCAmelCase =HTTPError
_lowerCAmelCase ={}
# Download this model to make sure it's in the cache.
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__A ) as mock_head:
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase =BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =AutoConfig.from_pretrained('bert-base-cased' )
_lowerCAmelCase =['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
_lowerCAmelCase =2
json.dump(configuration.to_dict() , open(os.path.join(__A , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase =['config.42.0.0.json']
_lowerCAmelCase =768
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , 'config.4.0.0.json' ) , os.path.join(__A , 'config.42.0.0.json' ) )
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase__ ( self ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase ='hf-internal-testing/test-two-configs'
import transformers as new_transformers
_lowerCAmelCase ='v4.0.0'
_lowerCAmelCase , _lowerCAmelCase =new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase ='v3.0.0'
_lowerCAmelCase =old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 768 )
| 58
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
if openai_config_file == "":
_lowerCAmelCase =OpenAIGPTConfig()
else:
_lowerCAmelCase =OpenAIGPTConfig.from_json_file(lowercase__ )
_lowerCAmelCase =OpenAIGPTModel(lowercase__ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
_lowerCAmelCase =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_lowerCAmelCase =pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , lowercase__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
lowercase_ = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 702
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A , __A , __A=None , __A=None ) -> List[Any]:
_lowerCAmelCase =start
_lowerCAmelCase =end
_lowerCAmelCase =val
_lowerCAmelCase =(start + end) // 2
_lowerCAmelCase =left
_lowerCAmelCase =right
def __repr__( self ) -> Optional[Any]:
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A ) -> Dict:
_lowerCAmelCase =collection
_lowerCAmelCase =function
if self.collection:
_lowerCAmelCase =self._build_tree(0 , len(lowercase__ ) - 1 )
def UpperCamelCase__ ( self , __A , __A ) -> str:
self._update_tree(self.root , lowercase__ , lowercase__ )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
return self._query_range(self.root , lowercase__ , lowercase__ )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
if start == end:
return SegmentTreeNode(lowercase__ , lowercase__ , self.collection[start] )
_lowerCAmelCase =(start + end) // 2
_lowerCAmelCase =self._build_tree(lowercase__ , lowercase__ )
_lowerCAmelCase =self._build_tree(mid + 1 , lowercase__ )
return SegmentTreeNode(lowercase__ , lowercase__ , self.fn(left.val , right.val ) , lowercase__ , lowercase__ )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Any:
if node.start == i and node.end == i:
_lowerCAmelCase =val
return
if i <= node.mid:
self._update_tree(node.left , lowercase__ , lowercase__ )
else:
self._update_tree(node.right , lowercase__ , lowercase__ )
_lowerCAmelCase =self.fn(node.left.val , node.right.val )
def UpperCamelCase__ ( self , __A , __A , __A ) -> List[str]:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowercase__ , lowercase__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowercase__ , node.mid ) , self._query_range(node.right , node.mid + 1 , lowercase__ ) , )
else:
# range in right child tree
return self._query_range(node.right , lowercase__ , lowercase__ )
def UpperCamelCase__ ( self ) -> str:
if self.root is not None:
_lowerCAmelCase =Queue()
queue.put(self.root )
while not queue.empty():
_lowerCAmelCase =queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
lowercase_ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 703
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58
| 0
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class SCREAMING_SNAKE_CASE ( _UpperCAmelCase):
"""simple docstring"""
lowercase : str = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True})
lowercase : ClassVar[Features] = Features({'audio': Audio()})
lowercase : ClassVar[Features] = Features({'transcription': Value('string')})
lowercase : str = "audio"
lowercase : str = "transcription"
def UpperCamelCase__ ( self , __A ) -> str:
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , lowercase__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
_lowerCAmelCase =copy.deepcopy(self )
_lowerCAmelCase =self.input_schema.copy()
_lowerCAmelCase =features[self.audio_column]
_lowerCAmelCase =input_schema
return task_template
@property
def UpperCamelCase__ ( self ) -> str:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 704
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =len(a__ ) // 2
# choose the middle 3 elements
_lowerCAmelCase =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =tempfile.mkdtemp()
_lowerCAmelCase =8
# DPR tok
_lowerCAmelCase =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_lowerCAmelCase =os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__snake_case , exist_ok=__snake_case )
_lowerCAmelCase =os.path.join(__snake_case , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
_lowerCAmelCase =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_lowerCAmelCase =dict(zip(__snake_case , range(len(__snake_case ) ) ) )
_lowerCAmelCase =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_lowerCAmelCase ={'''unk_token''': '''<unk>'''}
_lowerCAmelCase =os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__snake_case , exist_ok=__snake_case )
_lowerCAmelCase =os.path.join(__snake_case , BART_VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase =os.path.join(__snake_case , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def UpperCamelCase__ ( self ) -> str:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def UpperCamelCase__ ( self ) -> Dict:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def UpperCamelCase__ ( self ) -> str:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =os.path.join(self.tmpdirname , 'rag_tokenizer' )
_lowerCAmelCase =RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_lowerCAmelCase =RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__snake_case )
rag_tokenizer.save_pretrained(__snake_case )
_lowerCAmelCase =RagTokenizer.from_pretrained(__snake_case , config=__snake_case )
self.assertIsInstance(new_rag_tokenizer.question_encoder , __snake_case )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , __snake_case )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =RagTokenizer.from_pretrained('facebook/rag-token-nq' )
_lowerCAmelCase =[
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
_lowerCAmelCase =tokenizer(__snake_case )
self.assertIsNotNone(__snake_case )
@slow
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
_lowerCAmelCase =[
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
_lowerCAmelCase =tokenizer(__snake_case )
self.assertIsNotNone(__snake_case )
| 705
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.txt'''}
lowercase_ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowercase_ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowercase_ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ConvBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ) -> Union[str, Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
_lowerCAmelCase =getattr(__A , normalizer_state.pop('type' ) )
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =strip_accents
_lowerCAmelCase =tokenize_chinese_chars
_lowerCAmelCase =normalizer_class(**__A )
_lowerCAmelCase =do_lower_case
def UpperCamelCase__ ( self , __A , __A=None ) -> int:
_lowerCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 58
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> Optional[int]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_lowerCAmelCase =model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__A )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ='sshleifer/tiny-gpt2'
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
_lowerCAmelCase =PyTorchBenchmark(__A )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase ='sgugger/tiny-distilbert-classification'
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , only_pretrain_model=__A , )
_lowerCAmelCase =PyTorchBenchmark(__A )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase ='sshleifer/tiny-gpt2'
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , torchscript=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
_lowerCAmelCase =PyTorchBenchmark(__A )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ='sshleifer/tiny-gpt2'
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , fpaa=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
_lowerCAmelCase =PyTorchBenchmark(__A )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase ='sshleifer/tiny-gpt2'
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
# set architectures equal to `None`
_lowerCAmelCase =None
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
_lowerCAmelCase =PyTorchBenchmark(__A , configs=[config] )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase ='sshleifer/tiny-gpt2'
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
_lowerCAmelCase =PyTorchBenchmark(__A )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase ='sshleifer/tiny-gpt2'
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__A , multi_process=__A , )
_lowerCAmelCase =PyTorchBenchmark(__A )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase ='sshleifer/tiny-gpt2'
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
_lowerCAmelCase =PyTorchBenchmark(__A , configs=[config] )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ='sshleifer/tinier_bart'
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
_lowerCAmelCase =PyTorchBenchmark(__A , configs=[config] )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase ='sshleifer/tiny-gpt2'
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
_lowerCAmelCase =PyTorchBenchmark(__A , configs=[config] )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase ='sshleifer/tinier_bart'
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
_lowerCAmelCase =PyTorchBenchmark(__A , configs=[config] )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase ='sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , save_to_csv=__A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__A , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(__A , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(__A , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(__A , 'train_time.csv' ) , env_info_csv_file=os.path.join(__A , 'env.csv' ) , multi_process=__A , )
_lowerCAmelCase =PyTorchBenchmark(__A )
benchmark.run()
self.assertTrue(Path(os.path.join(__A , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__A , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__A , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__A , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__A , 'env.csv' ) ).exists() )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase ='sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__A ):
self.assertTrue(hasattr(__A , 'sequential' ) )
self.assertTrue(hasattr(__A , 'cumulative' ) )
self.assertTrue(hasattr(__A , 'current' ) )
self.assertTrue(hasattr(__A , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__A , 'log.txt' ) , log_print=__A , trace_memory_line_by_line=__A , multi_process=__A , )
_lowerCAmelCase =PyTorchBenchmark(__A )
_lowerCAmelCase =benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__A , 'log.txt' ) ).exists() )
| 706
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = ['image_processor', 'tokenizer']
lowercase : Any = 'CLIPImageProcessor'
lowercase : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __A=None , __A=None , **__A ) -> str:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
_lowerCAmelCase =kwargs.pop('feature_extractor' )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Optional[int]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 58
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCamelCase_ ( _UpperCamelCase):
"""simple docstring"""
lowercase : Union[str, Any] = "speech_to_text"
lowercase : Any = ["past_key_values"]
lowercase : int = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __A=1_0000 , __A=12 , __A=2048 , __A=4 , __A=6 , __A=2048 , __A=4 , __A=0.0 , __A=0.0 , __A=True , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.02 , __A=2 , __A=True , __A=1 , __A=0 , __A=2 , __A=6000 , __A=1024 , __A=2 , __A=(5, 5) , __A=1024 , __A=80 , __A=1 , **__A , ) -> Optional[int]:
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =encoder_ffn_dim
_lowerCAmelCase =encoder_layers
_lowerCAmelCase =encoder_attention_heads
_lowerCAmelCase =decoder_ffn_dim
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =dropout
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =activation_dropout
_lowerCAmelCase =activation_function
_lowerCAmelCase =init_std
_lowerCAmelCase =encoder_layerdrop
_lowerCAmelCase =decoder_layerdrop
_lowerCAmelCase =use_cache
_lowerCAmelCase =encoder_layers
_lowerCAmelCase =scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase =max_source_positions
_lowerCAmelCase =max_target_positions
_lowerCAmelCase =num_conv_layers
_lowerCAmelCase =list(__a )
_lowerCAmelCase =conv_channels
_lowerCAmelCase =input_feat_per_channel
_lowerCAmelCase =input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 707
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ) -> str:
super().__init__()
_lowerCAmelCase =nn.Sequential(
nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , )
_lowerCAmelCase =nn.Embedding(__A , __A )
_lowerCAmelCase =False
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.ModuleList()
for lyr_num in range(__A ):
# FiLM conditional T5 decoder
_lowerCAmelCase =DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A )
self.decoders.append(__A )
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Any:
_lowerCAmelCase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase =self.conditioning_emb(__A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase =torch.broadcast_to(
torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase =self.position_encoding(__A )
_lowerCAmelCase =self.continuous_inputs_projection(__A )
inputs += position_encodings
_lowerCAmelCase =self.dropout(__A )
# decoder: No padding present.
_lowerCAmelCase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase =[(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase =lyr(
__A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0]
_lowerCAmelCase =self.decoder_norm(__A )
_lowerCAmelCase =self.post_dropout(__A )
_lowerCAmelCase =self.spec_out(__A )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Any:
_lowerCAmelCase =self.layer[0](
__A , conditioning_emb=__A , attention_mask=__A , )
if encoder_hidden_states is not None:
_lowerCAmelCase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase =self.layer[1](
__A , key_value_states=__A , attention_mask=__A , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase =self.layer[-1](__A , __A )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> List[Any]:
# pre_self_attention_layer_norm
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.FiLMLayer(__A , __A )
# Self-attention block
_lowerCAmelCase =self.attention(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[int]:
super().__init__()
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> Tuple:
_lowerCAmelCase =self.layer_norm(__A )
_lowerCAmelCase =self.attention(
__A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.film(__A , __A )
_lowerCAmelCase =self.DenseReluDense(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(__A )
_lowerCAmelCase =NewGELUActivation()
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =self.act(self.wi_a(__A ) )
_lowerCAmelCase =self.wi_a(__A )
_lowerCAmelCase =hidden_gelu * hidden_linear
_lowerCAmelCase =self.dropout(__A )
_lowerCAmelCase =self.wo(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A=1E-6 ) -> int:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.ones(__A ) )
_lowerCAmelCase =eps
def UpperCamelCase__ ( self , __A ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A )
_lowerCAmelCase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__A , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , out_features * 2 , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.scale_bias(__A )
_lowerCAmelCase , _lowerCAmelCase =torch.chunk(__A , 2 , -1 )
_lowerCAmelCase =x * (1 + scale) + shift
return x
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase =np.shape(__UpperCamelCase )
if rows != columns:
_lowerCAmelCase =(
'\'table\' has to be of square shaped array but got a '
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__UpperCamelCase )
_lowerCAmelCase =np.zeros((rows, columns) )
_lowerCAmelCase =np.zeros((rows, columns) )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
_lowerCAmelCase =sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
_lowerCAmelCase =(table[i][j] - total) / upper[j][j]
_lowerCAmelCase =1
for j in range(__UpperCamelCase , __UpperCamelCase ):
_lowerCAmelCase =sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
_lowerCAmelCase =table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =[chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
_lowerCAmelCase =remove_duplicates(key.upper() )
_lowerCAmelCase =len(a__ )
# First fill cipher with key characters
_lowerCAmelCase ={alphabet[i]: char for i, char in enumerate(a__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(a__ ) , 2_6 ):
_lowerCAmelCase =alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_lowerCAmelCase =alphabet[i - offset]
_lowerCAmelCase =char
return cipher_alphabet
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
return "".join(cipher_map.get(a__ , a__ ) for ch in message.upper() )
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase ={v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(a__ , a__ ) for ch in message.upper() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =input('Enter message to encode or decode: ' ).strip()
_lowerCAmelCase =input('Enter keyword: ' ).strip()
_lowerCAmelCase =input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
_lowerCAmelCase ={'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
_lowerCAmelCase =create_cipher_map(a__ )
print(func(a__ , a__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 709
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
lowercase : List[str] = KandinskyVaaImgaImgPipeline
lowercase : Tuple = ['image_embeds', 'negative_image_embeds', 'image']
lowercase : Union[str, Any] = [
'image_embeds',
'negative_image_embeds',
'image',
]
lowercase : Optional[int] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase : Optional[Any] = False
@property
def UpperCamelCase__ ( self ) -> int:
return 32
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
return 32
@property
def UpperCamelCase__ ( self ) -> List[str]:
return self.time_input_dim
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self ) -> str:
return 100
@property
def UpperCamelCase__ ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase ={
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase =UNetaDConditionModel(**_a )
return model
@property
def UpperCamelCase__ ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase__ ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.dummy_unet
_lowerCAmelCase =self.dummy_movq
_lowerCAmelCase ={
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00_085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase =DDIMScheduler(**_a )
_lowerCAmelCase ={
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCamelCase__ ( self , __A , __A=0 ) -> str:
_lowerCAmelCase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_lowerCAmelCase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_lowerCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((256, 256) )
if str(_a ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(_a )
else:
_lowerCAmelCase =torch.Generator(device=_a ).manual_seed(_a )
_lowerCAmelCase ={
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ="""cpu"""
_lowerCAmelCase =self.get_dummy_components()
_lowerCAmelCase =self.pipeline_class(**_a )
_lowerCAmelCase =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_lowerCAmelCase =pipe(**self.get_dummy_inputs(_a ) )
_lowerCAmelCase =output.images
_lowerCAmelCase =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase =np.array(
[0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
_lowerCAmelCase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase ="""A red cartoon frog, 4k"""
_lowerCAmelCase =KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_lowerCAmelCase =KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
_lowerCAmelCase =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase =pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 710
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58
| 0
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_):
"""simple docstring"""
lowercase : str = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True})
lowercase : Union[str, Any] = Features({'image': Image()})
lowercase : List[Any] = Features({'labels': ClassLabel})
lowercase : Union[str, Any] = 'image'
lowercase : List[Any] = 'labels'
def UpperCamelCase__ ( self , __A ) -> Optional[Any]:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCamelCase__ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
_lowerCAmelCase =copy.deepcopy(self )
_lowerCAmelCase =self.label_schema.copy()
_lowerCAmelCase =features[self.label_column]
_lowerCAmelCase =label_schema
return task_template
@property
def UpperCamelCase__ ( self ) -> List[str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 711
|
'''simple docstring'''
def UpperCamelCase__ ( a__ = 1_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =2**power
_lowerCAmelCase =0
while n:
_lowerCAmelCase , _lowerCAmelCase =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((_lowerCAmelCase) , (_lowerCAmelCase)) =extended_euclid(lowerCamelCase_ , a % b )
_lowerCAmelCase =a // b
return (y, x - k * y)
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
((_lowerCAmelCase) , (_lowerCAmelCase)) =extended_euclid(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase =na * na
_lowerCAmelCase =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
((_lowerCAmelCase) , (_lowerCAmelCase)) =extended_euclid(lowerCamelCase_ , lowerCamelCase_ )
if b < 0:
_lowerCAmelCase =(b % n + n) % n
return b
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase =invert_modulo(lowerCamelCase_ , lowerCamelCase_ ), invert_modulo(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase =na * na
_lowerCAmelCase =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 712
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCAmelCase =set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 0
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def __init__( self , __A , __A = True , __A = None , __A = 32 , __A = True , __A = 1 / 255 , __A = True , __A = True , __A = [0.48_145_466, 0.4_578_275, 0.40_821_073] , __A = [0.26_862_954, 0.26_130_258, 0.27_577_711] , __A = True , __A=7 , __A=30 , __A=400 , __A=3 , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =do_resize
_lowerCAmelCase =size if size is not None else {"shortest_edge": 288}
_lowerCAmelCase =size_divisor
_lowerCAmelCase =do_rescale
_lowerCAmelCase =rescale_factor
_lowerCAmelCase =do_normalize
_lowerCAmelCase =do_center_crop
_lowerCAmelCase =image_mean
_lowerCAmelCase =image_std
_lowerCAmelCase =do_pad
_lowerCAmelCase =batch_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =min_resolution
_lowerCAmelCase =max_resolution
def UpperCamelCase__ ( self ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCamelCase__ ( self , __A , __A=False ) -> Optional[int]:
if not batched:
_lowerCAmelCase =self.size["shortest_edge"]
_lowerCAmelCase =image_inputs[0]
if isinstance(__A , Image.Image ):
_lowerCAmelCase =image.size
else:
_lowerCAmelCase =image.shape[1], image.shape[2]
_lowerCAmelCase =size / min(__A , __A )
if h < w:
_lowerCAmelCase =size, scale * w
else:
_lowerCAmelCase =scale * h, size
_lowerCAmelCase =int((1333 / 800) * size )
if max(__A , __A ) > max_size:
_lowerCAmelCase =max_size / max(__A , __A )
_lowerCAmelCase =newh * scale
_lowerCAmelCase =neww * scale
_lowerCAmelCase =int(newh + 0.5 ), int(neww + 0.5 )
_lowerCAmelCase =(
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
_lowerCAmelCase =[]
for image in image_inputs:
_lowerCAmelCase =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCAmelCase =max(__A , key=lambda __A : item[0] )[0]
_lowerCAmelCase =max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( _A , unittest.TestCase):
"""simple docstring"""
lowercase : Dict = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =BridgeTowerImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , 'image_mean' ) )
self.assertTrue(hasattr(__A , 'image_std' ) )
self.assertTrue(hasattr(__A , 'do_normalize' ) )
self.assertTrue(hasattr(__A , 'do_resize' ) )
self.assertTrue(hasattr(__A , 'size' ) )
self.assertTrue(hasattr(__A , 'size_divisor' ) )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
pass
def UpperCamelCase__ ( self ) -> List[Any]:
# Initialize image processor
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
_lowerCAmelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase =self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase =image_processing(__A , return_tensors='pt' ).pixel_values
_lowerCAmelCase =self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ) -> List[str]:
# Initialize image processor
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
_lowerCAmelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase =self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase =image_processing(__A , return_tensors='pt' ).pixel_values
_lowerCAmelCase =self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self ) -> List[str]:
# Initialize image processor
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
_lowerCAmelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCAmelCase =self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase =image_processing(__A , return_tensors='pt' ).pixel_values
_lowerCAmelCase =self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 713
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 58
| 0
|
'''simple docstring'''
from math import ceil
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =list(range(0 , UpperCamelCase__ ) )
_lowerCAmelCase =[item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_lowerCAmelCase =[]
for i in device_map_blocks:
if device_map_blocks.count(UpperCamelCase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(UpperCamelCase__ )
# Missing blocks
_lowerCAmelCase =[i for i in blocks if i not in device_map_blocks]
_lowerCAmelCase =[i for i in device_map_blocks if i not in blocks]
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(UpperCamelCase__ ) )
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =list(range(UpperCamelCase__ ) )
_lowerCAmelCase =int(ceil(n_layers / len(UpperCamelCase__ ) ) )
_lowerCAmelCase =[layers[i : i + n_blocks] for i in range(0 , UpperCamelCase__ , UpperCamelCase__ )]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
| 714
|
'''simple docstring'''
lowercase_ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='Morse code here!'
print(a__ )
_lowerCAmelCase =encrypt(a__ )
print(a__ )
_lowerCAmelCase =decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> int:
_lowerCAmelCase =[]
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(__SCREAMING_SNAKE_CASE )
self.set_fail_transitions()
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase__ ( self , __A ) -> Union[str, Any]:
_lowerCAmelCase =0
for character in keyword:
_lowerCAmelCase =self.find_next_state(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_lowerCAmelCase =len(self.adlist ) - 1
else:
_lowerCAmelCase =next_state
self.adlist[current_state]["output"].append(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =deque()
for node in self.adlist[0]["next_states"]:
q.append(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase =0
while q:
_lowerCAmelCase =q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase =self.adlist[r]['fail_state']
while (
self.find_next_state(__SCREAMING_SNAKE_CASE , self.adlist[child]['value'] ) is None
and state != 0
):
_lowerCAmelCase =self.adlist[state]['fail_state']
_lowerCAmelCase =self.find_next_state(
__SCREAMING_SNAKE_CASE , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
_lowerCAmelCase =0
_lowerCAmelCase =(
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def UpperCamelCase__ ( self , __A ) -> Any:
_lowerCAmelCase ={} # returns a dict with keywords and list of its occurrences
_lowerCAmelCase =0
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
while (
self.find_next_state(__SCREAMING_SNAKE_CASE , string[i] ) is None
and current_state != 0
):
_lowerCAmelCase =self.adlist[current_state]['fail_state']
_lowerCAmelCase =self.find_next_state(__SCREAMING_SNAKE_CASE , string[i] )
if next_state is None:
_lowerCAmelCase =0
else:
_lowerCAmelCase =next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_lowerCAmelCase =[]
result[key].append(i - len(__SCREAMING_SNAKE_CASE ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'data2vec-text'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 58
| 0
|
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( UpperCamelCase_):
"""simple docstring"""
def __init__( self , __A=-1 ) -> Dict:
_lowerCAmelCase =label_idx
def UpperCamelCase__ ( self , __A , __A ) -> List[InputExample]:
if isinstance(__a , __a ):
_lowerCAmelCase =mode.value
_lowerCAmelCase =os.path.join(__a , F'''{mode}.txt''' )
_lowerCAmelCase =1
_lowerCAmelCase =[]
with open(__a , encoding='utf-8' ) as f:
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__a , labels=__a ) )
guid_index += 1
_lowerCAmelCase =[]
_lowerCAmelCase =[]
else:
_lowerCAmelCase =line.split(' ' )
words.append(splits[0] )
if len(__a ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__a , labels=__a ) )
return examples
def UpperCamelCase__ ( self , __A , __A , __A ) -> Any:
_lowerCAmelCase =0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(__a )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_lowerCAmelCase =line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(__a )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def UpperCamelCase__ ( self , __A ) -> List[str]:
if path:
with open(__a , 'r' ) as f:
_lowerCAmelCase =f.read().splitlines()
if "O" not in labels:
_lowerCAmelCase =['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class SCREAMING_SNAKE_CASE ( UpperCamelCase_):
"""simple docstring"""
def __init__( self ) -> str:
super().__init__(label_idx=-2 )
def UpperCamelCase__ ( self , __A ) -> List[str]:
if path:
with open(__a , 'r' ) as f:
_lowerCAmelCase =f.read().splitlines()
if "O" not in labels:
_lowerCAmelCase =['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class SCREAMING_SNAKE_CASE ( UpperCamelCase_):
"""simple docstring"""
def UpperCamelCase__ ( self , __A , __A ) -> List[InputExample]:
if isinstance(__a , __a ):
_lowerCAmelCase =mode.value
_lowerCAmelCase =os.path.join(__a , F'''{mode}.txt''' )
_lowerCAmelCase =1
_lowerCAmelCase =[]
with open(__a , encoding='utf-8' ) as f:
for sentence in parse_incr(__a ):
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(__a ) == len(__a )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=__a , labels=__a ) )
guid_index += 1
return examples
def UpperCamelCase__ ( self , __A , __A , __A ) -> List[Any]:
_lowerCAmelCase =0
for sentence in parse_incr(__a ):
_lowerCAmelCase =preds_list[example_id]
_lowerCAmelCase =''
for token in sentence:
out += F'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(__a )
example_id += 1
def UpperCamelCase__ ( self , __A ) -> List[str]:
if path:
with open(__a , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 716
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = IFPipeline
lowercase : Tuple = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : int = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase__ ( self ) -> str:
return self._get_dummy_components()
def UpperCamelCase__ ( self , __A , __A=0 ) -> int:
if str(__A ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(__A )
else:
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(__A )
_lowerCAmelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ) -> str:
self._test_save_load_local()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
# if
_lowerCAmelCase =IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase =IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__A , tokenizer=__A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_lowerCAmelCase , _lowerCAmelCase =pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase =None
_lowerCAmelCase =None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase =IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase =IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase =IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase =IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__A , __A , __A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 58
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.