code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> int:
lowercase__ : int = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowercase__ : int = n - k
# Calculate C(n,k)
for i in range(SCREAMING_SNAKE_CASE_ ):
result *= n - i
result //= i + 1
return result
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
return binomial_coefficient(2 * node_count ,SCREAMING_SNAKE_CASE_ ) // (node_count + 1)
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
if n < 0:
raise ValueError("factorial() not defined for negative values" )
lowercase__ : Tuple = 1
for i in range(1 ,n + 1 ):
result *= i
return result
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
return catalan_number(SCREAMING_SNAKE_CASE_ ) * factorial(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__a : Optional[Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
f'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
f'binary trees and {catalan_number(node_count)} binary search trees.'
) | 397 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a : Optional[Any] = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Any = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__a : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 397 | 1 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not nums:
return 0
_lowerCamelCase : List[Any] = nums[0]
_lowerCamelCase : Tuple = 0
for num in nums[1:]:
_lowerCamelCase : Optional[int] = (
max_excluding + num,
max(_lowerCAmelCase , _lowerCAmelCase ),
)
return max(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 707 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : str = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "van"
def __init__( self : Optional[int] , lowerCamelCase : Any=224 , lowerCamelCase : str=3 , lowerCamelCase : Any=[7, 3, 3, 3] , lowerCamelCase : Dict=[4, 2, 2, 2] , lowerCamelCase : List[Any]=[64, 128, 320, 512] , lowerCamelCase : str=[3, 3, 12, 3] , lowerCamelCase : Dict=[8, 8, 4, 4] , lowerCamelCase : Any="gelu" , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Tuple=1E-6 , lowerCamelCase : Optional[int]=1E-2 , lowerCamelCase : int=0.0 , lowerCamelCase : Optional[Any]=0.0 , **lowerCamelCase : Optional[int] , ) -> int:
super().__init__(**lowerCamelCase )
__snake_case : Union[str, Any] = image_size
__snake_case : Any = num_channels
__snake_case : Any = patch_sizes
__snake_case : List[Any] = strides
__snake_case : str = hidden_sizes
__snake_case : Dict = depths
__snake_case : Optional[int] = mlp_ratios
__snake_case : Dict = hidden_act
__snake_case : Union[str, Any] = initializer_range
__snake_case : List[str] = layer_norm_eps
__snake_case : Optional[int] = layer_scale_init_value
__snake_case : List[Any] = drop_path_rate
__snake_case : int = dropout_rate
| 81 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''PoolFormerFeatureExtractor''']
UpperCAmelCase__ = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 351 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_UpperCAmelCase : Optional[Any] = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 288 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = '''ylacombe/bark-small'''
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = '''en_speaker_1'''
lowerCAmelCase__ = '''This is a test string'''
lowerCAmelCase__ = '''speaker_embeddings_path.json'''
lowerCAmelCase__ = '''speaker_embeddings'''
def __snake_case ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __snake_case ( self : Any ):
lowerCAmelCase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCAmelCase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCAmelCase__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCAmelCase__ = 35
lowerCAmelCase__ = 2
lowerCAmelCase__ = 8
lowerCAmelCase__ = {
'''semantic_prompt''': np.ones(SCREAMING_SNAKE_CASE_ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCAmelCase__ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCAmelCase__ = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = processor(text=self.input_string , voice_preset=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(SCREAMING_SNAKE_CASE_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCAmelCase__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = BarkProcessor(tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = processor(text=self.input_string )
lowerCAmelCase__ = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 288 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=9_9 , _UpperCamelCase=2_4 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=3_7 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=1_6 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1_0_0_0 , ) -> List[str]:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : Any = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : str = use_input_mask
UpperCAmelCase_ : Dict = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Tuple = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = type_vocab_size
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : str = scope
UpperCAmelCase_ : Union[str, Any] = range_bbox
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ : Optional[int] = bbox[i, j, 3]
UpperCAmelCase_ : str = bbox[i, j, 1]
UpperCAmelCase_ : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ : Optional[int] = bbox[i, j, 2]
UpperCAmelCase_ : int = bbox[i, j, 0]
UpperCAmelCase_ : Dict = t
UpperCAmelCase_ : Tuple = None
if self.use_input_mask:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase_ : List[Any] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : int = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self ) -> Dict:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Dict:
UpperCAmelCase_ : List[Any] = LiltModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ )
UpperCAmelCase_ : Optional[int] = model(lowercase__ , bbox=lowercase__ , token_type_ids=lowercase__ )
UpperCAmelCase_ : Optional[int] = model(lowercase__ , bbox=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.num_labels
UpperCAmelCase_ : Dict = LiltForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
UpperCAmelCase_ : Dict = model(
lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Tuple:
UpperCAmelCase_ : Dict = LiltForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(
lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) : Any = config_and_inputs
UpperCAmelCase_ : int = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case : str = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_snake_case : str = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : Any = False
_snake_case : List[Any] = False
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
return True
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = LiltModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Any = type
self.model_tester.create_and_check_model(*lowercase__ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@slow
def __UpperCAmelCase ( self ) -> str:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = LiltModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_torch
@slow
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Any = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase__ )
UpperCAmelCase_ : str = torch.tensor([[1, 2]] , device=lowercase__ )
UpperCAmelCase_ : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(input_ids=lowercase__ , bbox=lowercase__ )
UpperCAmelCase_ : List[Any] = torch.Size([1, 2, 7_6_8] )
UpperCAmelCase_ : Union[str, Any] = torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=lowercase__ , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase__ , atol=1E-3 ) )
| 406 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=2 , lowercase__=3_2 , lowercase__=1_6 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=3_2 , lowercase__=4 , lowercase__=[0, 1, 2, 3] , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=[1, 3_8_4, 2_4, 2_4] , lowercase__=True , lowercase__=None , ):
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : Tuple = patch_size
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = backbone_out_indices
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : List[Any] = backbone_featmap_shape
__UpperCAmelCase : Any = scope
__UpperCAmelCase : Optional[int] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__UpperCAmelCase : Optional[Any] = (image_size // patch_size) ** 2
__UpperCAmelCase : Any = num_patches + 1
def A( self):
__UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
__UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def A( self):
__UpperCAmelCase : Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [9_6, 1_9_2, 3_8_4, 7_6_8],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=lowercase__ , backbone_featmap_shape=self.backbone_featmap_shape , )
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : List[str] = DPTModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Dict = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : List[str] = self.num_labels
__UpperCAmelCase : Optional[Any] = DPTForDepthEstimation(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : str = model(lowercase__)
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size))
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : Optional[int] = DPTForSemanticSegmentation(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : str = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def A( self):
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Union[str, Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_lowerCAmelCase : Optional[int] = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : Any = False
_lowerCAmelCase : str = False
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : Any = DPTModelTester(self)
__UpperCAmelCase : str = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''')
def A( self):
pass
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(lowercase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__UpperCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class(lowercase__)
__UpperCAmelCase : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[str] = [*signature.parameters.keys()]
__UpperCAmelCase : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*lowercase__)
def A( self):
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase__)
def A( self):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = True
if model_class in get_values(lowercase__):
continue
__UpperCAmelCase : List[Any] = model_class(lowercase__)
model.to(lowercase__)
model.train()
__UpperCAmelCase : Any = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__)
__UpperCAmelCase : Any = model(**lowercase__).loss
loss.backward()
def A( self):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : str = True
if model_class in get_values(lowercase__) or not model_class.supports_gradient_checkpointing:
continue
__UpperCAmelCase : Tuple = model_class(lowercase__)
model.to(lowercase__)
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : Tuple = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__)
__UpperCAmelCase : Union[str, Any] = model(**lowercase__).loss
loss.backward()
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = _config_zero_init(lowercase__)
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class(config=lowercase__)
# Skip the check for the backbone
__UpperCAmelCase : List[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__UpperCAmelCase : Optional[Any] = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def A( self):
pass
@slow
def A( self):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__UpperCAmelCase : Optional[int] = DPTModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[str] = '''add'''
with self.assertRaises(lowercase__):
__UpperCAmelCase : Optional[Any] = DPTForDepthEstimation(lowercase__)
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : str = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''')
__UpperCAmelCase : str = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''').to(lowercase__)
__UpperCAmelCase : List[str] = prepare_img()
__UpperCAmelCase : Tuple = image_processor(images=lowercase__ , return_tensors='''pt''').to(lowercase__)
# forward pass
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**lowercase__)
__UpperCAmelCase : str = outputs.predicted_depth
# verify the predicted depth
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 3_8_4, 3_8_4))
self.assertEqual(predicted_depth.shape , lowercase__)
__UpperCAmelCase : List[str] = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]]).to(lowercase__)
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , lowercase__ , atol=1e-4))
| 462 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
__a : int = """ClapFeatureExtractor"""
__a : Tuple = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , A , A ) ->Any:
super().__init__(A , A )
def __call__( self , A=None , A=None , A=None , **A ) ->Optional[int]:
UpperCAmelCase__ :List[str] = kwargs.pop('sampling_rate' , A )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
UpperCAmelCase__ :List[Any] = self.tokenizer(A , return_tensors=A , **A )
if audios is not None:
UpperCAmelCase__ :Union[str, Any] = self.feature_extractor(
A , sampling_rate=A , return_tensors=A , **A )
if text is not None and audios is not None:
UpperCAmelCase__ :Dict = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) , tensor_type=A )
def A__ ( self , *A , **A ) ->List[Any]:
return self.tokenizer.batch_decode(*A , **A )
def A__ ( self , *A , **A ) ->int:
return self.tokenizer.decode(*A , **A )
@property
def A__ ( self ) ->Any:
UpperCAmelCase__ :Optional[Any] = self.tokenizer.model_input_names
UpperCAmelCase__ :Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 433 |
from itertools import count
def A ( SCREAMING_SNAKE_CASE = 50 ):
"""simple docstring"""
UpperCAmelCase__ :str = [1] * min_block_length
for n in count(SCREAMING_SNAKE_CASE ):
fill_count_functions.append(1 )
for block_length in range(SCREAMING_SNAKE_CASE , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 433 | 1 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( ) -> Dict:
__snake_case = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
__snake_case = Dataset.from_dict(_UpperCAmelCase )
return dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Any ):
"""simple docstring"""
__snake_case = get_dataset()
__snake_case = make_duplicate_clusters(a_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = get_dataset()
__snake_case , __snake_case = deduplicate_dataset(a_ )
self.assertEqual(len(a_ ) , 2 )
print(a_ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a_ )
| 69 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = GPTSwaTokenizer
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
def A ( self : int ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case = GPTSwaTokenizer(a_ , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : str , a_ : List[Any] ):
"""simple docstring"""
__snake_case = "This is a test"
__snake_case = "This is a test"
return input_text, output_text
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = "<s>"
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(a_ ) , 2_000 )
def A ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2_000 )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = GPTSwaTokenizer(a_ )
__snake_case = tokenizer.tokenize("This is a test" )
self.assertListEqual(a_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [465, 287, 265, 631, 842] )
__snake_case = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
a_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
__snake_case = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__snake_case = tokenizer.convert_ids_to_tokens(a_ )
# fmt: off
self.assertListEqual(
a_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = GPTSwaTokenizer(a_ )
__snake_case = ["This is a test", "I was born in 92000, and this is falsé."]
__snake_case = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(a_ , a_ ):
self.assertListEqual(tokenizer.encode_fast(a_ ) , a_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(a_ , a_ ):
self.assertEqual(tokenizer.decode_fast(a_ ) , a_ )
@slow
def A ( self : Any ):
"""simple docstring"""
__snake_case = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
__snake_case = {"input_ids": [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="AI-Sweden/gpt-sw3-126m" , sequences=a_ , )
| 69 | 1 |
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
UpperCamelCase = val
UpperCamelCase = None
UpperCamelCase = None
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
UpperCamelCase = Node(_a )
else:
self.left.insert(_a )
elif val > self.val:
if self.right is None:
UpperCamelCase = Node(_a )
else:
self.right.insert(_a )
else:
UpperCamelCase = val
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
# Recursive traversal
if root:
inorder(root.left , snake_case_ )
res.append(root.val )
inorder(root.right , snake_case_ )
def __lowerCamelCase ( _lowercase ) -> List[Any]:
# Build BST
if len(snake_case_ ) == 0:
return arr
UpperCamelCase = Node(arr[0] )
for i in range(1 , len(snake_case_ ) ):
root.insert(arr[i] )
# Traverse BST in order.
UpperCamelCase = []
inorder(snake_case_ , snake_case_ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 710 |
def __lowerCamelCase ( _lowercase ) -> list[int]:
UpperCamelCase = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase , UpperCamelCase = 0, 0
for i in range(1 , len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
UpperCamelCase = min_edge
while go_next(_lowercase , _lowercase , _lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase , UpperCamelCase = i, i + z_result[i] - 1
return z_result
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> bool:
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
UpperCamelCase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170 | 0 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = model.config
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
SCREAMING_SNAKE_CASE_ : List[str] = MBartConfig(
is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , add_cross_attention=lowerCamelCase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowerCamelCase_ , add_final_layer_norm=lowerCamelCase_ , )
return encoder_config, decoder_config
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] ) -> int:
"""simple docstring"""
if "encoder.model" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
SCREAMING_SNAKE_CASE_ : Any = 'encoder.' + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'encoder.layernorm.weight'
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE_ : Tuple = 'encoder.layernorm.bias'
return name
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ : List[str] = orig_state_dict.pop(lowerCamelCase_ )
if "qkv" in key:
SCREAMING_SNAKE_CASE_ : Dict = key.split('.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = int(key_split[3] )
SCREAMING_SNAKE_CASE_ : Tuple = int(key_split[5] )
SCREAMING_SNAKE_CASE_ : List[Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE_ : str = val[:dim, :]
SCREAMING_SNAKE_CASE_ : Optional[int] = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val[:dim]
SCREAMING_SNAKE_CASE_ : Dict = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ : Optional[Any] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
SCREAMING_SNAKE_CASE_ : List[str] = val
return orig_state_dict
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = DonutModel.from_pretrained(lowerCamelCase_ ).eval()
# load HuggingFace model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = get_configs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = DonutSwinModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = MBartForCausalLM(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = VisionEncoderDecoderModel(encoder=lowerCamelCase_ , decoder=lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = original_model.state_dict()
SCREAMING_SNAKE_CASE_ : Any = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify results on scanned document
SCREAMING_SNAKE_CASE_ : Dict = load_dataset('hf-internal-testing/example-documents' )
SCREAMING_SNAKE_CASE_ : int = dataset['test'][0]['image'].convert('RGB' )
SCREAMING_SNAKE_CASE_ : List[Any] = XLMRobertaTokenizerFast.from_pretrained(lowerCamelCase_ , from_slow=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
SCREAMING_SNAKE_CASE_ : List[Any] = DonutProcessor(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = processor(lowerCamelCase_ , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
SCREAMING_SNAKE_CASE_ : List[Any] = 'When is the coffee break?'
SCREAMING_SNAKE_CASE_ : str = task_prompt.replace('{user_input}' , lowerCamelCase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
SCREAMING_SNAKE_CASE_ : Any = '<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
SCREAMING_SNAKE_CASE_ : int = 's_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
SCREAMING_SNAKE_CASE_ : str = '<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hello world'
else:
raise ValueError('Model name not supported' )
SCREAMING_SNAKE_CASE_ : Optional[int] = original_model.decoder.tokenizer(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors='pt' )[
'input_ids'
]
SCREAMING_SNAKE_CASE_ : int = original_model.encoder.model.patch_embed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = model.encoder.embeddings(lowerCamelCase_ )
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 )
# verify encoder hidden states
SCREAMING_SNAKE_CASE_ : Any = original_model.encoder(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model.encoder(lowerCamelCase_ ).last_hidden_state
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-2 )
# verify decoder hidden states
SCREAMING_SNAKE_CASE_ : Dict = original_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).logits
SCREAMING_SNAKE_CASE_ : int = model(lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ ).logits
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
UpperCamelCase__ : Optional[int] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 105 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Dict = VQModel
__SCREAMING_SNAKE_CASE : Optional[int] = """sample"""
@property
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int]=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : Tuple ):
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : str ):
return (3, 32, 32)
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
pass
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__UpperCamelCase )
_UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__UpperCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCAmelCase = image.to(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
| 684 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : str = LxmertTokenizer
SCREAMING_SNAKE_CASE_ : Any = LxmertTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Any = True
def A ( self : str ) -> List[Any]:
super().setUp()
lowercase_ : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def A ( self : int , A : Dict ) -> List[Any]:
lowercase_ : List[Any] = '''UNwant\u00E9d,running'''
lowercase_ : Any = '''unwanted, running'''
return input_text, output_text
def A ( self : List[Any] ) -> Dict:
lowercase_ : Tuple = self.tokenizer_class(self.vocab_file )
lowercase_ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 10, 8, 9] )
def A ( self : str ) -> int:
if not self.test_rust_tokenizer:
return
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Optional[int] = self.get_rust_tokenizer()
lowercase_ : Optional[Any] = '''I was born in 92000, and this is falsé.'''
lowercase_ : List[str] = tokenizer.tokenize(A )
lowercase_ : Optional[int] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
lowercase_ : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A )
lowercase_ : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
lowercase_ : Optional[Any] = self.get_rust_tokenizer()
lowercase_ : Dict = tokenizer.encode(A )
lowercase_ : str = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
| 141 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase ( _A ):
def __init__( self : Optional[int] , A : VQModel , A : UNetaDModel , A : DDIMScheduler ) -> Union[str, Any]:
super().__init__()
self.register_modules(vqvae=A , unet=A , scheduler=A )
@torch.no_grad()
def __call__( self : List[Any] , A : int = 1 , A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A : float = 0.0 , A : int = 50 , A : Optional[str] = "pil" , A : bool = True , **A : Optional[int] , ) -> Union[Tuple, ImagePipelineOutput]:
lowercase_ : Optional[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A , )
lowercase_ : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowercase_ : Dict = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase_ : Dict = {}
if accepts_eta:
lowercase_ : int = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowercase_ : Optional[Any] = self.scheduler.scale_model_input(A , A )
# predict the noise residual
lowercase_ : int = self.unet(A , A ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : List[Any] = self.scheduler.step(A , A , A , **A ).prev_sample
# decode the image latents with the VAE
lowercase_ : int = self.vqvae.decode(A ).sample
lowercase_ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowercase_ : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 141 | 1 |
'''simple docstring'''
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 42 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356 | 0 |
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCamelCase_ ( ):
_a : int = 9
_a : Optional[int] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_a : str = kruskal(UpperCamelCase_ , UpperCamelCase_ )
_a : Optional[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(UpperCamelCase_ ) == sorted(UpperCamelCase_ )
| 710 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
def snake_case_ ( self : Any ) -> Any:
_a : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__snake_case , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__snake_case , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__snake_case , '''num_attention_heads''' ) )
class lowerCamelCase :
def __init__( self : str , __snake_case : List[str] , __snake_case : Optional[int]=13 , __snake_case : Any=32 , __snake_case : Union[str, Any]=2 , __snake_case : List[str]=3 , __snake_case : List[str]=640 , __snake_case : Any=4 , __snake_case : Union[str, Any]="silu" , __snake_case : Union[str, Any]=3 , __snake_case : str=32 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.1 , __snake_case : Any=0.1 , __snake_case : Union[str, Any]=0.02 , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : Tuple=10 , __snake_case : List[Any]=None , ) -> int:
_a : int = parent
_a : Any = batch_size
_a : Dict = image_size
_a : int = patch_size
_a : List[Any] = num_channels
_a : Dict = last_hidden_size
_a : Tuple = num_attention_heads
_a : List[str] = hidden_act
_a : Optional[Any] = conv_kernel_size
_a : Optional[Any] = output_stride
_a : int = hidden_dropout_prob
_a : Dict = attention_probs_dropout_prob
_a : Optional[int] = classifier_dropout_prob
_a : List[str] = use_labels
_a : Union[str, Any] = is_training
_a : Optional[int] = num_labels
_a : Dict = initializer_range
_a : List[str] = scope
def snake_case_ ( self : Dict ) -> Union[str, Any]:
_a : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = None
_a : List[Any] = None
if self.use_labels:
_a : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self : Union[str, Any] ) -> Tuple:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def snake_case_ ( self : Optional[Any] , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : List[Any] ) -> Union[str, Any]:
_a : Optional[Any] = MobileViTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : int = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ ( self : Dict , __snake_case : List[str] , __snake_case : int , __snake_case : int , __snake_case : Optional[Any] ) -> str:
_a : Any = self.num_labels
_a : List[Any] = MobileViTForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
_a : Tuple = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Dict , __snake_case : Dict ) -> Optional[int]:
_a : List[Any] = self.num_labels
_a : Dict = MobileViTForSemanticSegmentation(__snake_case )
model.to(__snake_case )
model.eval()
_a : List[str] = model(__snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_a : Any = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ ( self : int ) -> str:
_a : List[str] = self.prepare_config_and_inputs()
_a , _a , _a , _a : int = config_and_inputs
_a : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase : Optional[int] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase : Optional[int] = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase : str = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Tuple = False
def snake_case_ ( self : List[Any] ) -> List[Any]:
_a : str = MobileViTModelTester(self )
_a : List[Any] = MobileViTConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def snake_case_ ( self : Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def snake_case_ ( self : List[str] ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def snake_case_ ( self : Any ) -> Union[str, Any]:
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def snake_case_ ( self : List[str] ) -> str:
pass
def snake_case_ ( self : List[str] ) -> Union[str, Any]:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(__snake_case )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[int] = [*signature.parameters.keys()]
_a : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case_ ( self : int ) -> Dict:
pass
def snake_case_ ( self : int ) -> Union[str, Any]:
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case_ ( self : Tuple ) -> Optional[int]:
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : str , __snake_case : Union[str, Any] ):
_a : List[Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_a : Optional[int] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_a : Optional[Any] = outputs.hidden_states
_a : Union[str, Any] = 5
self.assertEqual(len(__snake_case ) , __snake_case )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_a : Optional[Any] = 2
for i in range(len(__snake_case ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : int = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case_ ( self : Tuple ) -> List[str]:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
def snake_case_ ( self : Dict ) -> Union[str, Any]:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__snake_case )
@slow
def snake_case_ ( self : Any ) -> str:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = MobileViTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowerCamelCase_ ( ):
_a : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def snake_case_ ( self : List[str] ) -> Dict:
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def snake_case_ ( self : Tuple ) -> int:
_a : str = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(__snake_case )
_a : Tuple = self.default_image_processor
_a : Union[str, Any] = prepare_img()
_a : List[Any] = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
_a : Dict = model(**__snake_case )
# verify the logits
_a : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __snake_case )
_a : Dict = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1E-4 ) )
@slow
def snake_case_ ( self : Union[str, Any] ) -> Optional[int]:
_a : int = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_a : Union[str, Any] = model.to(__snake_case )
_a : Optional[int] = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_a : List[Any] = prepare_img()
_a : int = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
_a : Dict = model(**__snake_case )
_a : Union[str, Any] = outputs.logits
# verify the logits
_a : Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __snake_case )
_a : str = torch.tensor(
[
[[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]],
[[-10.6_869, -10.3_250, -10.3_471], [-10.4_228, -9.9_868, -9.7_132], [-11.0_405, -11.0_221, -10.7_318]],
[[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]],
] , device=__snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def snake_case_ ( self : Tuple ) -> Optional[int]:
_a : Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_a : Tuple = model.to(__snake_case )
_a : Dict = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_a : Tuple = prepare_img()
_a : Tuple = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
_a : List[Any] = model(**__snake_case )
_a : List[str] = outputs.logits.detach().cpu()
_a : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(50, 60)] )
_a : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __snake_case )
_a : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=__snake_case )
_a : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __snake_case )
| 249 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = [1]
A__ , A__ , A__ = 0, 0, 0
A__ = ugly_nums[ia] * 2
A__ = ugly_nums[ia] * 3
A__ = ugly_nums[ia] * 5
for _ in range(1 , lowercase_ ):
A__ = min(lowercase_ , lowercase_ , lowercase_ )
ugly_nums.append(lowercase_ )
if next_num == next_a:
ia += 1
A__ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
A__ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
A__ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'''{ugly_numbers(200) = }''')
| 87 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
"""simple docstring"""
if len(lowercase_ ) <= 1:
return [tuple(lowercase_ )]
A__ = []
def generate(lowercase_ , lowercase_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowercase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A__ , A__ = arr[k - 1], arr[i]
else: # k is odd
A__ , A__ = arr[k - 1], arr[0]
generate(k - 1 , lowercase_ )
generate(len(lowercase_ ) , lowercase_ )
return res
if __name__ == "__main__":
_lowerCamelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCamelCase : str = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 87 | 1 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __magic_name__ ( lowercase , lowercase ) -> Any:
"""simple docstring"""
lowercase_ : List[Any] = torch.load(lowercase , map_location="""cpu""" )
lowercase_ : str = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
lowercase_ : Optional[int] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowercase_ : List[str] = v
else:
lowercase_ : List[Any] = v
lowercase_ : str = chkpt["""params"""]
lowercase_ : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(lowercase , (torch.FloatTensor, numpy.ndarray) )}
lowercase_ : Any = chkpt["""dico_word2id"""]
lowercase_ : Any = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
lowercase_ : Tuple = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowercase_ : str = pytorch_dump_folder_path + """/""" + CONFIG_NAME
lowercase_ : Union[str, Any] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(lowercase , lowercase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase , indent=2 ) + """\n""" )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase , indent=2 ) + """\n""" )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path) | 703 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : Tuple = """t5"""
__a : Optional[Any] = ["""past_key_values"""]
__a : Optional[int] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self, snake_case__=3_21_28, snake_case__=5_12, snake_case__=64, snake_case__=20_48, snake_case__=6, snake_case__=None, snake_case__=8, snake_case__=32, snake_case__=1_28, snake_case__=0.1, snake_case__=1E-6, snake_case__=1.0, snake_case__="relu", snake_case__=True, snake_case__=True, snake_case__=0, snake_case__=1, **snake_case__, ) -> List[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = vocab_size
lowercase_ : int = d_model
lowercase_ : int = d_kv
lowercase_ : Optional[Any] = d_ff
lowercase_ : Union[str, Any] = num_layers
lowercase_ : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase_ : str = num_heads
lowercase_ : int = relative_attention_num_buckets
lowercase_ : int = relative_attention_max_distance
lowercase_ : Any = dropout_rate
lowercase_ : Optional[Any] = layer_norm_epsilon
lowercase_ : int = initializer_factor
lowercase_ : Union[str, Any] = feed_forward_proj
lowercase_ : Any = use_cache
lowercase_ : Optional[int] = self.feed_forward_proj.split("""-""" )
lowercase_ : Optional[Any] = act_info[-1]
lowercase_ : Union[str, Any] = act_info[0] == """gated"""
if len(snake_case__ ) > 1 and act_info[0] != "gated" or len(snake_case__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase_ : Optional[int] = """gelu_new"""
super().__init__(
pad_token_id=snake_case__, eos_token_id=snake_case__, is_encoder_decoder=snake_case__, **snake_case__, )
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
@property
def snake_case__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase_ : str = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
lowercase_ : Any = """past_encoder_sequence + sequence"""
lowercase_ : Tuple = {0: """batch"""}
lowercase_ : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowercase_ : int = {0: """batch""", 1: """decoder_sequence"""}
lowercase_ : List[str] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(snake_case__, direction="""inputs""" )
return common_inputs
@property
def snake_case__ ( self ) -> int:
"""simple docstring"""
return 13 | 436 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Optional[int] = ConsistencyModelPipeline
SCREAMING_SNAKE_CASE : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE : Tuple = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
SCREAMING_SNAKE_CASE : Any = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
if class_cond:
lowerCAmelCase = self.dummy_cond_unet
else:
lowerCAmelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = ConsistencyModelPipeline(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components(class_cond=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = ConsistencyModelPipeline(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = 0
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = ConsistencyModelPipeline(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = 1
lowerCAmelCase = None
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components(class_cond=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = ConsistencyModelPipeline(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = 1
lowerCAmelCase = None
lowerCAmelCase = 0
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=(1, 3, 64, 64) ):
'''simple docstring'''
lowerCAmelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
lowerCAmelCase = self.get_fixed_latents(seed=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , shape=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = latents
return inputs
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=(1, 3, 64, 64) ):
'''simple docstring'''
if type(_SCREAMING_SNAKE_CASE ) == str:
lowerCAmelCase = torch.device(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
return latents
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCAmelCase = ConsistencyModelPipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
pipe.to(torch_device=_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_inputs()
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCAmelCase = ConsistencyModelPipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
pipe.to(torch_device=_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_inputs()
lowerCAmelCase = 1
lowerCAmelCase = None
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCAmelCase = ConsistencyModelPipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
pipe.to(torch_device=_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_inputs(get_fixed_latents=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_SCREAMING_SNAKE_CASE , enable_math=_SCREAMING_SNAKE_CASE , enable_mem_efficient=_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
lowerCAmelCase = ConsistencyModelPipeline(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
pipe.to(torch_device=_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_inputs(get_fixed_latents=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = 1
lowerCAmelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_SCREAMING_SNAKE_CASE , enable_math=_SCREAMING_SNAKE_CASE , enable_mem_efficient=_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 284 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 284 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A__ = get_tests_dir("fixtures")
A__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
A__ = get_tests_dir("fixtures/dummy-config.json")
class __UpperCamelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
__magic_name__ = 0
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
__magic_name__ = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__ = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__magic_name__ = AutoFeatureExtractor.from_pretrained(__UpperCamelCase ).to_dict()
config_dict.pop('feature_extractor_type' )
__magic_name__ = WavaVecaFeatureExtractor(**__UpperCamelCase )
# save in new folder
model_config.save_pretrained(__UpperCamelCase )
config.save_pretrained(__UpperCamelCase )
__magic_name__ = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
# make sure private variable is not incorrectly saved
__magic_name__ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
__magic_name__ = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('bert-base' )
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(__UpperCamelCase , revision='aaaaaa' )
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCamelCase ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__UpperCamelCase )
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__UpperCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__UpperCamelCase )
__magic_name__ = AutoFeatureExtractor.from_pretrained(__UpperCamelCase , trust_remote_code=__UpperCamelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
try:
AutoConfig.register('custom' , __UpperCamelCase )
AutoFeatureExtractor.register(__UpperCamelCase , __UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoFeatureExtractor.register(__UpperCamelCase , __UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
__magic_name__ = CustomFeatureExtractor.from_pretrained(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__UpperCamelCase )
__magic_name__ = AutoFeatureExtractor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
class __UpperCamelCase ( SCREAMING_SNAKE_CASE ):
_lowercase : str = True
try:
AutoConfig.register('custom' , __UpperCamelCase )
AutoFeatureExtractor.register(__UpperCamelCase , __UpperCamelCase )
# If remote code is not set, the default is to use local
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__UpperCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=__UpperCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(__UpperCamelCase , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 184 |
import os
A__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def _lowercase ( a_ : str ) -> int:
'''simple docstring'''
__magic_name__ = 0
__magic_name__ = 0
while index < len(a_ ) - 1:
__magic_name__ = SYMBOLS[numerals[index]]
__magic_name__ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _lowercase ( a_ : int ) -> str:
'''simple docstring'''
__magic_name__ = ''
__magic_name__ = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
__magic_name__ = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
__magic_name__ = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _lowercase ( a_ : str = "/p089_roman.txt" ) -> int:
'''simple docstring'''
__magic_name__ = 0
with open(os.path.dirname(a_ ) + roman_numerals_filename ) as filea:
__magic_name__ = filea.readlines()
for line in lines:
__magic_name__ = line.strip()
__magic_name__ = parse_roman_numerals(a_ )
__magic_name__ = generate_roman_numerals(a_ )
savings += len(a_ ) - len(a_ )
return savings
if __name__ == "__main__":
print(f'''{solution() = }''')
| 184 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class snake_case__ ( snake_case_ ):
_snake_case : Union[List[PIL.Image.Image], np.ndarray]
_snake_case : Optional[List[bool]]
_snake_case : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 528 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
SCREAMING_SNAKE_CASE__:Dict = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 528 | 1 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = LayoutLMTokenizer
__UpperCAmelCase = LayoutLMTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = True
def lowercase_ ( self ):
super().setUp()
__snake_case : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self , **_UpperCAmelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[str] = 'UNwant\u00E9d,running'
__snake_case : Tuple = 'unwanted, running'
return input_text, output_text
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.tokenizer_class(self.vocab_file )
__snake_case : Union[str, Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def lowercase_ ( self ):
pass
| 679 | import math
import os
import sys
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Union[str, Any] = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
__snake_case : Optional[Any] = binary_file.read()
for dat in data:
__snake_case : Tuple = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ):
lexicon.pop(__UpperCAmelCase )
__snake_case : Union[str, Any] = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__snake_case : Tuple = '0' + lexicon[curr_key]
__snake_case : Any = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Tuple = {'0': '0', '1': '1'}
__snake_case , __snake_case : Optional[int] = '', ''
__snake_case : str = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
__snake_case : Union[str, Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = os.path.getsize(__UpperCAmelCase )
__snake_case : List[Any] = bin(__UpperCAmelCase )[2:]
__snake_case : Any = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : Tuple = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
__snake_case : int = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = read_file_binary(__UpperCAmelCase )
__snake_case : Tuple = compress_data(__UpperCAmelCase )
__snake_case : int = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 679 | 1 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int]=10_24 ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], []
SCREAMING_SNAKE_CASE_ = list(zip(__UpperCamelCase , __UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = sorted_examples[0]
def is_too_big(__UpperCAmelCase : Optional[Any] ):
return tok(__UpperCamelCase , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
SCREAMING_SNAKE_CASE_ = new_src + ' ' + src
SCREAMING_SNAKE_CASE_ = new_tgt + ' ' + tgt
if is_too_big(__UpperCamelCase ) or is_too_big(__UpperCamelCase ): # cant fit, finalize example
finished_src.append(__UpperCamelCase )
finished_tgt.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = src, tgt
else: # can fit, keep adding
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__UpperCamelCase )
finished_tgt.append(__UpperCamelCase )
return finished_src, finished_tgt
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = Path(__UpperCamelCase )
save_path.mkdir(exist_ok=__UpperCamelCase )
for split in ["train"]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = data_dir / f"{split}.source", data_dir / f"{split}.target"
SCREAMING_SNAKE_CASE_ = [x.rstrip() for x in Path(__UpperCamelCase ).open().readlines()]
SCREAMING_SNAKE_CASE_ = [x.rstrip() for x in Path(__UpperCamelCase ).open().readlines()]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pack_examples(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
print(f"packed {split} split from {len(__UpperCamelCase )} examples -> {len(__UpperCamelCase )}." )
Path(save_path / f"{split}.source" ).open('w' ).write('\n'.join(__UpperCamelCase ) )
Path(save_path / f"{split}.target" ).open('w' ).write('\n'.join(__UpperCamelCase ) )
for split in ["val", "test"]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = data_dir / f"{split}.source", data_dir / f"{split}.target"
shutil.copyfile(__UpperCamelCase , save_path / f"{split}.source" )
shutil.copyfile(__UpperCamelCase , save_path / f"{split}.target" )
def UpperCAmelCase_ ( ) -> List[str]:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=__UpperCamelCase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=__UpperCamelCase , default=1_28 )
parser.add_argument('--data_dir' , type=__UpperCamelCase )
parser.add_argument('--save_path' , type=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__UpperCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli() | 31 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
lowerCamelCase_ = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
lowerCamelCase_ = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
lowerCamelCase_ = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return text, ids
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。 こんばんは、㔺界。'
lowerCamelCase_ = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids without special tokens
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Testing conversion to ids with special tokens
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizer()
# Testing tokenization
lowerCamelCase_ = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
lowerCamelCase_ = 'こんにちは、、、、世界。こんばんは、、、、世界。'
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。'
lowerCamelCase_ = 'こんばんは、㔺界。😀'
lowerCamelCase_ = 'こんにちは、世界。こんばんは、世界。😀'
lowerCamelCase_ = tokenizer.encode(prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode('' , prefix_text=prefix_text + input_text )
lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCamelCase_ = 'こんにちは、世界。'
lowerCamelCase_ = 'こんばんは、㔺界。😀'
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2
lowerCamelCase_ = [1] + [0] * (len_prefix + len_text + 1)
lowerCamelCase_ = [1] * (len_prefix + len_text + 1) + [0]
lowerCamelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCamelCase_ = tokenizer(prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ).token_type_ids
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = tokenizer.encode('あンいワ' )
lowerCamelCase_ = tokenizer.encode('' , prefix_text='あンいワ' )
lowerCamelCase_ = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCamelCase_ = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# fmt: off
lowerCamelCase_ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
lowerCamelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCamelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
| 42 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( a , unittest.TestCase ):
_UpperCamelCase = KandinskyImgaImgPipeline
_UpperCamelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
_UpperCamelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
_UpperCamelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase = False
@property
def snake_case ( self ):
return 32
@property
def snake_case ( self ):
return 32
@property
def snake_case ( self ):
return self.time_input_dim
@property
def snake_case ( self ):
return self.time_input_dim * 4
@property
def snake_case ( self ):
return 100
@property
def snake_case ( self ):
A : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def snake_case ( self ):
torch.manual_seed(0 )
A : Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
A : int = MultilingualCLIP(UpperCamelCase__ )
A : List[Any] = text_encoder.eval()
return text_encoder
@property
def snake_case ( self ):
torch.manual_seed(0 )
A : Union[str, Any] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
A : str = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def snake_case ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case ( self ):
torch.manual_seed(0 )
A : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case ( self ):
A : Tuple = self.dummy_text_encoder
A : Union[str, Any] = self.dummy_tokenizer
A : Dict = self.dummy_unet
A : str = self.dummy_movq
A : Optional[Any] = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
A : Optional[int] = DDIMScheduler(**UpperCamelCase__ )
A : Tuple = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
A : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase__ )
# create init_image
A : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A : Dict = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((256, 256) )
if str(UpperCamelCase__ ).startswith('''mps''' ):
A : Dict = torch.manual_seed(UpperCamelCase__ )
else:
A : int = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A : List[str] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def snake_case ( self ):
A : Any = '''cpu'''
A : Tuple = self.get_dummy_components()
A : Dict = self.pipeline_class(**UpperCamelCase__ )
A : Optional[int] = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A : List[str] = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A : Optional[int] = output.images
A : Any = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A : str = image[0, -3:, -3:, -1]
A : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A : List[str] = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
A : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
A : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
A : List[Any] = '''A red cartoon frog, 4k'''
A : Optional[int] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
A : Dict = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
A : Dict = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
A : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
A, A : Optional[int] = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
A : Optional[int] = pipeline(
UpperCamelCase__ , image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
A : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 700 |
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCamelCase( UpperCamelCase__ : Dict ) -> Union[str, Any]:
A : Optional[Any] = fname.split(os.path.sep )[-1]
return re.search(R'''^(.*)_\d+\.jpg$''' , UpperCamelCase__ ).groups()[0]
class _lowercase ( a ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
A : str = file_names
A : Optional[int] = image_transform
A : str = label_to_id
def __len__( self ):
return len(self.file_names )
def __getitem__( self , _UpperCAmelCase ):
A : int = self.file_names[idx]
A : int = PIL.Image.open(_UpperCAmelCase )
A : str = raw_image.convert('''RGB''' )
if self.image_transform is not None:
A : Dict = self.image_transform(_UpperCAmelCase )
A : Tuple = extract_label(_UpperCAmelCase )
if self.label_to_id is not None:
A : Optional[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCamelCase( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ) -> Any:
# Initialize accelerator
if args.with_tracking:
A : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
A : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A : List[str] = config['''lr''']
A : int = int(config['''num_epochs'''] )
A : List[str] = int(config['''seed'''] )
A : Any = int(config['''batch_size'''] )
A : List[str] = config['''image_size''']
if not isinstance(UpperCamelCase__ , (list, tuple) ):
A : List[str] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
A : List[Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
A : Optional[Any] = int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
A : Optional[Any] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
A : Any = os.path.split(UpperCamelCase__ )[-1].split('''.''' )[0]
accelerator.init_trackers(UpperCamelCase__ , UpperCamelCase__ )
# Grab all the image filenames
A : int = [os.path.join(args.data_dir , UpperCamelCase__ ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
A : int = [extract_label(UpperCamelCase__ ) for fname in file_names]
A : str = list(set(UpperCamelCase__ ) )
id_to_label.sort()
A : Dict = {lbl: i for i, lbl in enumerate(UpperCamelCase__ )}
# Set the seed before splitting the data.
np.random.seed(UpperCamelCase__ )
torch.manual_seed(UpperCamelCase__ )
torch.cuda.manual_seed_all(UpperCamelCase__ )
# Split our filenames between train and validation
A : Dict = np.random.permutation(len(UpperCamelCase__ ) )
A : str = int(0.8 * len(UpperCamelCase__ ) )
A : Tuple = random_perm[:cut]
A : List[Any] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
A : Any = Compose([RandomResizedCrop(UpperCamelCase__ , scale=(0.5, 1.0) ), ToTensor()] )
A : List[Any] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=UpperCamelCase__ , label_to_id=UpperCamelCase__ )
# For evaluation, we use a deterministic Resize
A : Optional[Any] = Compose([Resize(UpperCamelCase__ ), ToTensor()] )
A : List[Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=UpperCamelCase__ , label_to_id=UpperCamelCase__ )
# Instantiate dataloaders.
A : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
A : Tuple = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A : Union[str, Any] = create_model('''resnet50d''' , pretrained=UpperCamelCase__ , num_classes=len(UpperCamelCase__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A : Optional[int] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
A : Union[str, Any] = False
for param in model.get_classifier().parameters():
A : Any = True
# We normalize the batches of images to be a bit faster.
A : Dict = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
A : str = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
A : List[Any] = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
A : int = OneCycleLR(optimizer=UpperCamelCase__ , max_lr=UpperCamelCase__ , epochs=UpperCamelCase__ , steps_per_epoch=len(UpperCamelCase__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A, A, A, A, A : Optional[int] = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
A : Dict = 0
# We also need to keep track of the starting epoch so files are named properly
A : str = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
A : Optional[int] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
A : Dict = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
A : Optional[int] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
A : Optional[int] = os.path.splitext(UpperCamelCase__ )[0]
if "epoch" in training_difference:
A : Tuple = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
A : Union[str, Any] = None
else:
A : int = int(training_difference.replace('''step_''' , '''''' ) )
A : str = resume_step // len(UpperCamelCase__ )
resume_step -= starting_epoch * len(UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):
model.train()
if args.with_tracking:
A : int = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
A : str = accelerator.skip_first_batches(UpperCamelCase__ , UpperCamelCase__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
A : int = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
A : Any = {k: v.to(accelerator.device ) for k, v in batch.items()}
A : Optional[int] = (batch['''image'''] - mean) / std
A : int = model(UpperCamelCase__ )
A : List[Any] = torch.nn.functional.cross_entropy(UpperCamelCase__ , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(UpperCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A : List[Any] = F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
A : Dict = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
model.eval()
A : Optional[int] = 0
A : int = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
A : Any = {k: v.to(accelerator.device ) for k, v in batch.items()}
A : Any = (batch['''image'''] - mean) / std
with torch.no_grad():
A : Union[str, Any] = model(UpperCamelCase__ )
A : Tuple = outputs.argmax(dim=-1 )
A, A : List[Any] = accelerator.gather_for_metrics((predictions, batch['''label''']) )
A : Any = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
A : str = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 100 * eval_metric,
'''train_loss''': total_loss.item() / len(UpperCamelCase__ ),
'''epoch''': epoch,
} , step=UpperCamelCase__ , )
if checkpointing_steps == "epoch":
A : Dict = F'''epoch_{epoch}'''
if args.output_dir is not None:
A : Dict = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
if args.with_tracking:
accelerator.end_training()
def _lowerCamelCase( ) -> int:
A : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=UpperCamelCase__ , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=UpperCamelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=UpperCamelCase__ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
A : Tuple = parser.parse_args()
A : List[str] = {'''lr''': 3e-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 224}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 537 | 0 |
import numpy as np
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , lowercase_ , (alpha * (np.exp(lowercase_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _UpperCamelCase ( unittest.TestCase , _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: List[Any] = load_tool("""text-classification""" )
self.tool.setup()
UpperCamelCase_: Optional[int] = load_tool("""text-classification""" , remote=snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Union[str, Any] = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Tuple = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: int = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: List[Any] = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
| 548 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
A_ : str = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class _lowercase ( a__ ):
_UpperCAmelCase = '''open-llama'''
def __init__( self : Optional[int] , __lowerCAmelCase : int=10_0000 , __lowerCAmelCase : str=4096 , __lowerCAmelCase : Any=1_1008 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : Dict=32 , __lowerCAmelCase : Optional[Any]="silu" , __lowerCAmelCase : Optional[int]=2048 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Tuple=1E-6 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : int=0 , __lowerCAmelCase : Any=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : int=False , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : int , ) -> Union[str, Any]:
"""simple docstring"""
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = initializer_range
a = rms_norm_eps
a = use_cache
a = kwargs.pop(
"use_memorry_efficient_attention" , _A )
a = hidden_dropout_prob
a = attention_dropout_prob
a = use_stable_embedding
a = shared_input_output_embedding
a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A , )
def A ( self : int ) -> int:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _A ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
a = self.rope_scaling.get("type" , _A )
a = self.rope_scaling.get("factor" , _A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_A , _A ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 716 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A_ : int = logging.getLogger(__name__)
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase = field(
default='''NER''', metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase = field(default=UpperCAmelCase__, metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
@dataclass
class _lowercase :
_UpperCAmelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''}, )
_UpperCAmelCase = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
_UpperCAmelCase = field(
default=UpperCAmelCase__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
a = import_module("tasks" )
try:
a = getattr(UpperCAmelCase__ , model_args.task_type )
a = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
a = token_classification_task.get_labels(data_args.labels )
a = dict(enumerate(UpperCAmelCase__ ) )
a = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , cache_dir=model_args.cache_dir , )
a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
a = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
a = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCAmelCase__ :np.ndarray , UpperCAmelCase__ :np.ndarray ) -> Tuple[List[int], List[int]]:
a = np.argmax(UpperCAmelCase__ , axis=2 )
a , a = preds.shape
a = [[] for _ in range(UpperCAmelCase__ )]
a = [[] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCAmelCase__ :EvalPrediction ) -> Dict:
a , a = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"precision": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"recall": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"f1": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ),
}
# Data collator
a = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
a = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a = trainer.evaluate()
a = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(UpperCAmelCase__ )
# Predict
if training_args.do_predict:
a = TokenClassificationDataset(
token_classification_task=UpperCAmelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase__ , labels=UpperCAmelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
a , a , a = trainer.predict(UpperCAmelCase__ )
a , a = align_predictions(UpperCAmelCase__ , UpperCAmelCase__ )
a = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , UpperCAmelCase__ , UpperCAmelCase__ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
a = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return results
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 32 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
assert column_title.isupper()
_lowerCAmelCase = 0
_lowerCAmelCase = len(__A ) - 1
_lowerCAmelCase = 0
while index >= 0:
_lowerCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , __A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 156 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
return len(set(__A)) == len(__A)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
"""simple docstring"""
def a ( __UpperCAmelCase : str ) -> Dict:
__magic_name__: Tuple = 0
while len(SCREAMING_SNAKE_CASE_ ) > 1:
__magic_name__: Dict = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__magic_name__: Any = files.index(min(SCREAMING_SNAKE_CASE_ ) )
temp += files[min_index]
files.pop(SCREAMING_SNAKE_CASE_ )
files.append(SCREAMING_SNAKE_CASE_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__lowerCamelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCamelCase = {
'vocab_file': {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt',
},
'tokenizer_file': {
'unc-nlp/lxmert-base-uncased': (
'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'
),
},
}
__lowerCamelCase = {
'unc-nlp/lxmert-base-uncased': 5_12,
}
__lowerCamelCase = {
'unc-nlp/lxmert-base-uncased': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = LxmertTokenizer
def __init__( self : Union[str, Any] , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : Tuple=True , __snake_case : Optional[int]="[UNK]" , __snake_case : Tuple="[SEP]" , __snake_case : int="[PAD]" , __snake_case : Optional[Any]="[CLS]" , __snake_case : int="[MASK]" , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=None , **__snake_case : List[str] , ) -> Optional[int]:
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , )
__magic_name__: List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __snake_case ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __snake_case ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __snake_case ) != tokenize_chinese_chars
):
__magic_name__: List[Any] = getattr(__snake_case , normalizer_state.pop("""type""" ) )
__magic_name__: Dict = do_lower_case
__magic_name__: List[Any] = strip_accents
__magic_name__: List[str] = tokenize_chinese_chars
__magic_name__: Tuple = normalizer_class(**__snake_case )
__magic_name__: Tuple = do_lower_case
def lowerCamelCase__ ( self : List[Any] , __snake_case : str , __snake_case : int=None ) -> List[str]:
__magic_name__: int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
__magic_name__: List[str] = [self.sep_token_id]
__magic_name__: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
__magic_name__: List[str] = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
| 213 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
_lowerCamelCase : Optional[Any] = 5
_lowerCamelCase : str = 10
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = SpeechaTextTokenizer
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
def A ( self : List[str] ):
"""simple docstring"""
super().setUp()
UpperCamelCase = sp.SentencePieceProcessor()
spm_model.Load(UpperCamelCase__ )
UpperCamelCase = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(UpperCamelCase__ ) )]
UpperCamelCase = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
UpperCamelCase = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES['spm_file'] )
UpperCamelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = '<pad>'
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(UpperCamelCase__ ) , 1_0_0_1 )
def A ( self : str ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_1 )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
UpperCamelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCamelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6] , )
UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCamelCase__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
UpperCamelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8] )
UpperCamelCase = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = {'input_ids': [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """valhalla/s2t_mustc_multilinguial_medium"""
_SCREAMING_SNAKE_CASE = """C'est trop cool"""
_SCREAMING_SNAKE_CASE = """Esto es genial"""
@classmethod
def A ( cls : List[str] ):
"""simple docstring"""
UpperCamelCase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def A ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 1_1 )
def A ( self : List[str] ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_0_0_0_0 )
def A ( self : Optional[int] ):
"""simple docstring"""
self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids )
UpperCamelCase = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2]
UpperCamelCase = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = 'fr'
UpperCamelCase = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , UpperCamelCase__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
UpperCamelCase = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 430 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """gptj"""
_SCREAMING_SNAKE_CASE = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , UpperCamelCase__ : List[Any]=5_0_4_0_0 , UpperCamelCase__ : int=2_0_4_8 , UpperCamelCase__ : Dict=4_0_9_6 , UpperCamelCase__ : Dict=2_8 , UpperCamelCase__ : str=1_6 , UpperCamelCase__ : Union[str, Any]=6_4 , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]="gelu_new" , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : str=1E-5 , UpperCamelCase__ : Tuple=0.0_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Union[str, Any]=5_0_2_5_6 , UpperCamelCase__ : int=5_0_2_5_6 , UpperCamelCase__ : int=False , **UpperCamelCase__ : Tuple , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = n_positions
UpperCamelCase = n_embd
UpperCamelCase = n_layer
UpperCamelCase = n_head
UpperCamelCase = n_inner
UpperCamelCase = rotary_dim
UpperCamelCase = activation_function
UpperCamelCase = resid_pdrop
UpperCamelCase = embd_pdrop
UpperCamelCase = attn_pdrop
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = use_cache
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : PretrainedConfig , UpperCamelCase__ : str = "default" , UpperCamelCase__ : List[PatchingSpec] = None , UpperCamelCase__ : bool = False , ):
"""simple docstring"""
super().__init__(UpperCamelCase__ , task=UpperCamelCase__ , patching_specs=UpperCamelCase__ , use_past=UpperCamelCase__ )
if not getattr(self._config , 'pad_token_id' , UpperCamelCase__ ):
# TODO: how to do that better?
UpperCamelCase = 0
@property
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def A ( self : List[str] ):
"""simple docstring"""
return self._config.n_layer
@property
def A ( self : str ):
"""simple docstring"""
return self._config.n_head
def A ( self : List[Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCamelCase = super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase = seqlen + 2
UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
UpperCamelCase = common_inputs['attention_mask']
if self.use_past:
UpperCamelCase = ordered_inputs['attention_mask'].dtype
UpperCamelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def A ( self : int ):
"""simple docstring"""
return 1_3
| 430 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=24 , lowerCamelCase_=2 , lowerCamelCase_=6 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=None , lowerCamelCase_=10_00 , ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = scope
_UpperCamelCase = range_bbox
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCamelCase = bbox[i, j, 3]
_UpperCamelCase = bbox[i, j, 1]
_UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCamelCase = bbox[i, j, 2]
_UpperCamelCase = bbox[i, j, 0]
_UpperCamelCase = t
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase ( self ) -> Tuple:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = LiltModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , bbox=_A , attention_mask=_A , token_type_ids=_A )
_UpperCamelCase = model(_A , bbox=_A , token_type_ids=_A )
_UpperCamelCase = model(_A , bbox=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> str:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = LiltForTokenClassification(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(
_A , bbox=_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = LiltForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(
_A , bbox=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
(
_UpperCamelCase
) = config_and_inputs
_UpperCamelCase = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( a__ , a__ , a__ , unittest.TestCase ):
__lowercase : Dict = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase : List[Any] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : List[Any] = False
__lowercase : Optional[int] = False
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
return True
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase = LiltModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase ( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase = type
self.model_tester.create_and_check_model(*_A )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@slow
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = LiltModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@slow
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(_A )
_UpperCamelCase = torch.tensor([[1, 2]] , device=_A )
_UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_A )
# forward pass
with torch.no_grad():
_UpperCamelCase = model(input_ids=_A , bbox=_A )
_UpperCamelCase = torch.Size([1, 2, 7_68] )
_UpperCamelCase = torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=_A , )
self.assertTrue(outputs.last_hidden_state.shape , _A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _A , atol=1E-3 ) )
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 589 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger()
def UpperCamelCase__ ( _lowercase : int , _lowercase : str , _lowercase : LevitConfig , _lowercase : Path , _lowercase : bool = True ) -> Optional[int]:
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_2_8:
if name[-1] == "S":
__UpperCAmelCase: Union[str, Any] = timm.create_model("""levit_128s""" , pretrained=_lowercase )
else:
__UpperCAmelCase: List[str] = timm.create_model("""levit_128""" , pretrained=_lowercase )
if hidden_sizes == 1_9_2:
__UpperCAmelCase: int = timm.create_model("""levit_192""" , pretrained=_lowercase )
if hidden_sizes == 2_5_6:
__UpperCAmelCase: List[Any] = timm.create_model("""levit_256""" , pretrained=_lowercase )
if hidden_sizes == 3_8_4:
__UpperCAmelCase: str = timm.create_model("""levit_384""" , pretrained=_lowercase )
from_model.eval()
__UpperCAmelCase: Optional[int] = LevitForImageClassificationWithTeacher(_lowercase ).eval()
__UpperCAmelCase: Optional[Any] = OrderedDict()
__UpperCAmelCase: Dict = from_model.state_dict()
__UpperCAmelCase: List[str] = list(from_model.state_dict().keys() )
__UpperCAmelCase: List[str] = list(our_model.state_dict().keys() )
print(len(_lowercase ) , len(_lowercase ) )
for i in range(len(_lowercase ) ):
__UpperCAmelCase: List[str] = weights[og_keys[i]]
our_model.load_state_dict(_lowercase )
__UpperCAmelCase: List[Any] = torch.randn((2, 3, 2_2_4, 2_2_4) )
__UpperCAmelCase: Tuple = from_model(_lowercase )
__UpperCAmelCase: Any = our_model(_lowercase ).logits
assert torch.allclose(_lowercase , _lowercase ), "The model logits don't match the original one."
__UpperCAmelCase: Optional[int] = name
print(_lowercase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__UpperCAmelCase: int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def UpperCamelCase__ ( _lowercase : Path , _lowercase : str = None , _lowercase : bool = True ) -> str:
__UpperCAmelCase: Dict = """imagenet-1k-id2label.json"""
__UpperCAmelCase: Union[str, Any] = 1_0_0_0
__UpperCAmelCase: List[Any] = (1, num_labels)
__UpperCAmelCase: List[str] = """huggingface/label-files"""
__UpperCAmelCase: Optional[Any] = num_labels
__UpperCAmelCase: str = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
__UpperCAmelCase: Dict = {int(_lowercase ): v for k, v in idalabel.items()}
__UpperCAmelCase: str = idalabel
__UpperCAmelCase: Union[str, Any] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase: List[Any] = partial(_lowercase , num_labels=_lowercase , idalabel=_lowercase , labelaid=_lowercase )
__UpperCAmelCase: List[str] = {
"""levit-128S""": 1_2_8,
"""levit-128""": 1_2_8,
"""levit-192""": 1_9_2,
"""levit-256""": 2_5_6,
"""levit-384""": 3_8_4,
}
__UpperCAmelCase: Optional[int] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 8, 1_2] , depths=[4, 4, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[1_9_2, 2_8_8, 3_8_4] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[2_5_6, 3_8_4, 5_1_2] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[3_8_4, 5_1_2, 7_6_8] , num_attention_heads=[6, 9, 1_2] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _lowercase , names_to_config[model_name] , _lowercase , _lowercase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _lowercase , _lowercase , _lowercase , _lowercase )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 523 | '''simple docstring'''
from math import pi
def UpperCamelCase__ ( _lowercase : int , _lowercase : int ) -> float:
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10)) | 523 | 1 |
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
__snake_case : List[str] = BeautifulSoup(requests.get(__SCREAMING_SNAKE_CASE ).text , """html.parser""" )
__snake_case : str = soup.findAll("""h1""" )
__snake_case : List[Any] = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F'''{key}\n{value}\n''')
| 390 | from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase ):
@register_to_config
def __init__( self : int , _lowerCAmelCase : int = 7_68 , ):
super().__init__()
__snake_case : Optional[Any] = nn.Parameter(torch.zeros(1 , _lowerCAmelCase ) )
__snake_case : str = nn.Parameter(torch.ones(1 , _lowerCAmelCase ) )
def snake_case__ ( self : str , _lowerCAmelCase : Optional[Union[str, torch.device]] = None , _lowerCAmelCase : Optional[torch.dtype] = None , ):
__snake_case : int = nn.Parameter(self.mean.to(_lowerCAmelCase ).to(_lowerCAmelCase ) )
__snake_case : List[str] = nn.Parameter(self.std.to(_lowerCAmelCase ).to(_lowerCAmelCase ) )
return self
def snake_case__ ( self : Any , _lowerCAmelCase : Optional[Any] ):
__snake_case : Optional[int] = (embeds - self.mean) * 1.0 / self.std
return embeds
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Optional[int] ):
__snake_case : Optional[int] = (embeds * self.std) + self.mean
return embeds
| 390 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
a : List[Any] = logging.get_logger(__name__)
a : Union[str, Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a : Dict = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
a : List[Any] = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
a : Dict = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
a : List[Any] = {F"""funnel-transformer/{name}""": {"""do_lower_case""": True} for name in _model_names}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = FunnelTokenizer
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = 2
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__="<unk>" , snake_case__="<sep>" , snake_case__="<pad>" , snake_case__="<cls>" , snake_case__="<mask>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__=True , snake_case__=True , snake_case__=None , snake_case__="##" , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , clean_text=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , wordpieces_prefix=snake_case__ , **snake_case__ , )
lowercase__ : Union[str, Any]= json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars
):
lowercase__ : Any= getattr(snake_case__ , normalizer_state.pop("type" ) )
lowercase__ : Optional[Any]= do_lower_case
lowercase__ : Optional[Any]= strip_accents
lowercase__ : Optional[int]= tokenize_chinese_chars
lowercase__ : Optional[int]= normalizer_class(**snake_case__ )
lowercase__ : Optional[int]= do_lower_case
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
lowercase__ : str= [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : int= [self.sep_token_id]
lowercase__ : List[str]= [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : List[str]= self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 218 |
"""simple docstring"""
from __future__ import annotations
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : str= data
lowercase__ : Node | None= None
lowercase__ : Node | None= None
def lowercase__(A ) ->None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowercase__(A ) ->int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowercase__(A ) ->bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowercase__() ->None: # Main function for testing.
"""simple docstring"""
lowercase__ : int= Node(1 )
lowercase__ : Union[str, Any]= Node(2 )
lowercase__ : Optional[int]= Node(3 )
lowercase__ : Optional[Any]= Node(4 )
lowercase__ : Optional[Any]= Node(5 )
lowercase__ : Tuple= Node(6 )
lowercase__ : Any= Node(7 )
lowercase__ : Tuple= Node(8 )
lowercase__ : List[Any]= Node(9 )
print(is_full_binary_tree(A ) )
print(depth_of_tree(A ) )
print("Tree is: " )
display(A )
if __name__ == "__main__":
main()
| 218 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.model'}
lowerCAmelCase__ = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
lowerCAmelCase__ = {
'google/rembert': 256,
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]="[CLS]" , UpperCAmelCase_ : Tuple="[SEP]" , UpperCAmelCase_ : Tuple="[UNK]" , UpperCAmelCase_ : Optional[int]="[SEP]" , UpperCAmelCase_ : Any="[PAD]" , UpperCAmelCase_ : Tuple="[CLS]" , UpperCAmelCase_ : List[str]="[MASK]" , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCamelCase__ : Tuple = do_lower_case
UpperCamelCase__ : List[Any] = remove_space
UpperCamelCase__ : Any = keep_accents
UpperCamelCase__ : Tuple = vocab_file
UpperCamelCase__ : Tuple = spm.SentencePieceProcessor()
self.sp_model.Load(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : List[str]):
return len(self.sp_model)
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Optional[int] = {self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Dict):
UpperCamelCase__ : Union[str, Any] = self.__dict__.copy()
UpperCamelCase__ : Union[str, Any] = None
return state
def __setstate__( self : List[str] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = d
UpperCamelCase__ : Tuple = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False):
UpperCamelCase__ : Dict = self.sp_model.EncodeAsPieces(UpperCAmelCase_)
return pieces
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Tuple):
return self.sp_model.PieceToId(UpperCAmelCase_)
def __UpperCamelCase ( self : int , UpperCAmelCase_ : List[str]):
return self.sp_model.IdToPiece(UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any]):
UpperCamelCase__ : Tuple = self.sp_model.decode_pieces(UpperCAmelCase_)
return out_string
def __UpperCamelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Dict = [self.sep_token_id]
UpperCamelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : List[str] = [self.sep_token_id]
UpperCamelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase_):
logger.error('Vocabulary path ({}) should be a directory'.format(UpperCAmelCase_))
return
UpperCamelCase__ : Dict = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_):
copyfile(self.vocab_file , UpperCAmelCase_)
return (out_vocab_file,)
| 6 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
UpperCamelCase__ : List[str] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip')
model.to(UpperCAmelCase_)
from datasets import load_dataset
UpperCamelCase__ : Optional[Any] = load_dataset('nielsr/rvlcdip-demo')
UpperCamelCase__ : int = dataset['train'][0]['image'].convert('RGB')
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCAmelCase_)
UpperCamelCase__ : Tuple = outputs.logits
UpperCamelCase__ : str = torch.Size((1, 16))
self.assertEqual(logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Tuple = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=UpperCAmelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 6 | 1 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a_ ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
_lowercase = get_activation("""swish""" )
self.assertIsInstance(__UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ):
_lowercase = get_activation("""silu""" )
self.assertIsInstance(__UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ):
_lowercase = get_activation("""mish""" )
self.assertIsInstance(__UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ):
_lowercase = get_activation("""gelu""" )
self.assertIsInstance(__UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) | 287 |
lowercase__ : Optional[int] = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 515 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_ = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
UpperCAmelCase_ = {
"camembert-base": 5_12,
}
UpperCAmelCase_ = "▁"
class __lowercase ( __magic_name__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["""input_ids""", """attention_mask"""]
_a = CamembertTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="<s>" , UpperCamelCase="</s>" , UpperCamelCase="</s>" , UpperCamelCase="<s>" , UpperCamelCase="<unk>" , UpperCamelCase="<pad>" , UpperCamelCase="<mask>" , UpperCamelCase=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
__a = vocab_file
__a = False if not self.vocab_file else True
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 707 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = "▁"
UpperCAmelCase_ = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase_ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase_ = {
"facebook/nllb-200-distilled-600M": 10_24,
}
# fmt: off
UpperCAmelCase_ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __lowercase ( __magic_name__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = ["""input_ids""", """attention_mask"""]
_a = []
_a = []
def __init__( self , UpperCamelCase , UpperCamelCase="<s>" , UpperCamelCase="</s>" , UpperCamelCase="</s>" , UpperCamelCase="<s>" , UpperCamelCase="<unk>" , UpperCamelCase="<pad>" , UpperCamelCase="<mask>" , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase = None , UpperCamelCase=None , UpperCamelCase=False , **UpperCamelCase , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
__a = {} if sp_model_kwargs is None else sp_model_kwargs
__a = legacy_behaviour
super().__init__(
bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , tokenizer_file=UpperCamelCase , src_lang=UpperCamelCase , tgt_lang=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=UpperCamelCase , **UpperCamelCase , )
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase ) )
__a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__a = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a = 1
__a = len(self.sp_model )
__a = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase )
}
__a = {v: k for k, v in self.lang_code_to_id.items()}
__a = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__a = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__a = src_lang if src_lang is not None else 'eng_Latn'
__a = self.lang_code_to_id[self._src_lang]
__a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Union[str, Any]:
__a = self.__dict__.copy()
__a = None
__a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase ) -> str:
__a = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCamelCase__ ( self ) -> str:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase__ ( self ) -> str:
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self , UpperCamelCase ) -> None:
__a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
__a = [1] * len(self.prefix_tokens )
__a = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase )) + ([0] * len(UpperCamelCase )) + suffix_ones
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__a = src_lang
__a = self(UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
__a = self.convert_tokens_to_ids(UpperCamelCase )
__a = tgt_lang_id
return inputs
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , UpperCamelCase ) -> List[str]:
return self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a = self.sp_model.PieceToId(UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self , UpperCamelCase ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self , UpperCamelCase ) -> str:
__a = ''.join(UpperCamelCase ).replace(UpperCamelCase , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , 'wb' ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = "eng_Latn" , UpperCamelCase = None , UpperCamelCase = "fra_Latn" , **UpperCamelCase , ) -> BatchEncoding:
__a = src_lang
__a = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase , UpperCamelCase , **UpperCamelCase )
def UpperCamelCase__ ( self ) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self ) -> int:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self , UpperCamelCase ) -> None:
__a = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
else:
__a = [self.cur_lang_code]
__a = [self.eos_token_id]
def UpperCamelCase__ ( self , UpperCamelCase ) -> None:
__a = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
else:
__a = [self.cur_lang_code]
__a = [self.eos_token_id]
| 490 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = '''▁'''
__lowercase = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__lowercase = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
__lowercase = {
'''facebook/m2m100_418M''': 1_0_2_4,
}
# fmt: off
__lowercase = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : int = ['''input_ids''', '''attention_mask''']
UpperCAmelCase_ : List[int] = []
UpperCAmelCase_ : List[int] = []
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="m2m100" , __lowerCAmelCase = None , __lowerCAmelCase=8 , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase = language_codes
lowerCAmelCase = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowerCAmelCase = {lang_code: f"__{lang_code}__" for lang_code in fairseq_language_code}
lowerCAmelCase = kwargs.get("""additional_special_tokens""" , [])
kwargs["additional_special_tokens"] += [
self.get_lang_token(__lowerCAmelCase)
for lang_code in fairseq_language_code
if self.get_lang_token(__lowerCAmelCase) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , language_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = vocab_file
lowerCAmelCase = load_json(__lowerCAmelCase)
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
lowerCAmelCase = spm_file
lowerCAmelCase = load_spm(__lowerCAmelCase , self.sp_model_kwargs)
lowerCAmelCase = len(self.encoder)
lowerCAmelCase = {
self.get_lang_token(__lowerCAmelCase): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase)
}
lowerCAmelCase = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase)}
lowerCAmelCase = {v: k for k, v in self.lang_token_to_id.items()}
lowerCAmelCase = src_lang if src_lang is not None else """en"""
lowerCAmelCase = tgt_lang
lowerCAmelCase = self.get_lang_id(self._src_lang)
self.set_src_lang_special_tokens(self._src_lang)
lowerCAmelCase = num_madeup_words
@property
def a_ ( self):
"""simple docstring"""
return len(self.encoder) + len(self.lang_token_to_id)
@property
def a_ ( self):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token])
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__lowerCAmelCase , self.unk_token)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCAmelCase) + token
lowerCAmelCase = []
else:
current_sub_tokens.append(__lowerCAmelCase)
out_string += self.sp_model.decode(__lowerCAmelCase)
return out_string.strip()
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase)
lowerCAmelCase = [1] * len(self.prefix_tokens)
lowerCAmelCase = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCAmelCase)) + suffix_ones
return prefix_ones + ([0] * len(__lowerCAmelCase)) + ([0] * len(__lowerCAmelCase)) + suffix_ones
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = {self.convert_ids_to_tokens(__lowerCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
lowerCAmelCase = {}
lowerCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = Path(__lowerCAmelCase)
if not save_dir.is_dir():
raise OSError(f"{save_directory} should be a directory")
lowerCAmelCase = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
lowerCAmelCase = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , __lowerCAmelCase)
if os.path.abspath(self.spm_file) != os.path.abspath(__lowerCAmelCase) and os.path.isfile(self.spm_file):
copyfile(self.spm_file , __lowerCAmelCase)
elif not os.path.isfile(self.spm_file):
with open(__lowerCAmelCase , """wb""") as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase)
return (str(__lowerCAmelCase), str(__lowerCAmelCase))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = "en" , __lowerCAmelCase = None , __lowerCAmelCase = "ro" , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = src_lang
lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self.src_lang)
return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""")
lowerCAmelCase = src_lang
lowerCAmelCase = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.get_lang_id(__lowerCAmelCase)
lowerCAmelCase = tgt_lang_id
return inputs
def a_ ( self):
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang)
def a_ ( self):
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.get_lang_token(__lowerCAmelCase)
lowerCAmelCase = self.lang_token_to_id[lang_token]
lowerCAmelCase = [self.cur_lang_id]
lowerCAmelCase = [self.eos_token_id]
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.get_lang_token(__lowerCAmelCase)
lowerCAmelCase = self.lang_token_to_id[lang_token]
lowerCAmelCase = [self.cur_lang_id]
lowerCAmelCase = [self.eos_token_id]
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return self.lang_code_to_token[lang]
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.get_lang_token(__lowerCAmelCase)
return self.lang_token_to_id[lang_token]
def snake_case__ ( _A: str , _A: Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
lowerCAmelCase = sentencepiece.SentencePieceProcessor(**_A )
spm.Load(str(_A ) )
return spm
def snake_case__ ( _A: str ) -> Union[Dict, List]:
'''simple docstring'''
with open(_A , """r""" ) as f:
return json.load(_A )
def snake_case__ ( _A: str , _A: str ) -> None:
'''simple docstring'''
with open(_A , """w""" ) as f:
json.dump(_A , _A , indent=2 )
| 370 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''ChineseCLIPFeatureExtractor''']
__lowercase = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370 | 1 |
from bisect import bisect
from itertools import accumulate
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple = sorted(zip(__lowerCAmelCase , __lowerCAmelCase ) , key=lambda __lowerCAmelCase : x[0] / x[1] , reverse=__lowerCAmelCase )
snake_case__ : Optional[int] = [i[0] for i in r], [i[1] for i in r]
snake_case__ : Union[str, Any] = list(accumulate(__lowerCAmelCase ) )
snake_case__ : Tuple = bisect(__lowerCAmelCase , __lowerCAmelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class a :
__lowerCAmelCase : CommonSchedulerState
# setable values
__lowerCAmelCase : jnp.ndarray
__lowerCAmelCase : jnp.ndarray
__lowerCAmelCase : Optional[int] = None
@classmethod
def __lowerCamelCase ( cls :Tuple ,__lowercase :CommonSchedulerState ,__lowercase :jnp.ndarray ,__lowercase :jnp.ndarray ):
return cls(common=__lowercase ,init_noise_sigma=__lowercase ,timesteps=__lowercase )
@dataclass
class a ( __lowerCamelCase ):
__lowerCAmelCase : DDPMSchedulerState
class a ( __lowerCamelCase , __lowerCamelCase ):
__lowerCAmelCase : Dict = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCAmelCase : jnp.dtype
@property
def __lowerCamelCase ( self :int ):
return True
@register_to_config
def __init__( self :Tuple ,__lowercase :int = 1_0_0_0 ,__lowercase :float = 0.0001 ,__lowercase :float = 0.02 ,__lowercase :str = "linear" ,__lowercase :Optional[jnp.ndarray] = None ,__lowercase :str = "fixed_small" ,__lowercase :bool = True ,__lowercase :str = "epsilon" ,__lowercase :jnp.dtype = jnp.floataa ,):
snake_case__ : Optional[int] = dtype
def __lowerCamelCase ( self :List[Any] ,__lowercase :Optional[CommonSchedulerState] = None ):
if common is None:
snake_case__ : Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
snake_case__ : Union[str, Any] = jnp.array(1.0 ,dtype=self.dtype )
snake_case__ : str = jnp.arange(0 ,self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__lowercase ,init_noise_sigma=__lowercase ,timesteps=__lowercase ,)
def __lowerCamelCase ( self :List[str] ,__lowercase :DDPMSchedulerState ,__lowercase :jnp.ndarray ,__lowercase :Optional[int] = None ):
return sample
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :DDPMSchedulerState ,__lowercase :int ,__lowercase :Tuple = () ):
snake_case__ : str = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
snake_case__ : Optional[int] = (jnp.arange(0 ,__lowercase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__lowercase ,timesteps=__lowercase ,)
def __lowerCamelCase ( self :List[str] ,__lowercase :DDPMSchedulerState ,__lowercase :List[Any] ,__lowercase :str=None ,__lowercase :Any=None ):
snake_case__ : List[Any] = state.common.alphas_cumprod[t]
snake_case__ : Tuple = jnp.where(t > 0 ,state.common.alphas_cumprod[t - 1] ,jnp.array(1.0 ,dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
snake_case__ : Dict = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
snake_case__ : Optional[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
snake_case__ : Tuple = jnp.clip(__lowercase ,a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
snake_case__ : str = jnp.log(jnp.clip(__lowercase ,a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
snake_case__ : Optional[int] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
snake_case__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
snake_case__ : List[str] = variance
snake_case__ : Tuple = state.common.betas[t]
snake_case__ : List[Any] = (predicted_variance + 1) / 2
snake_case__ : str = frac * max_log + (1 - frac) * min_log
return variance
def __lowerCamelCase ( self :Tuple ,__lowercase :DDPMSchedulerState ,__lowercase :jnp.ndarray ,__lowercase :int ,__lowercase :jnp.ndarray ,__lowercase :Optional[jax.random.KeyArray] = None ,__lowercase :bool = True ,):
snake_case__ : Union[str, Any] = timestep
if key is None:
snake_case__ : Any = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
snake_case__ , snake_case__ : int = jnp.split(__lowercase ,sample.shape[1] ,axis=1 )
else:
snake_case__ : Optional[int] = None
# 1. compute alphas, betas
snake_case__ : Union[str, Any] = state.common.alphas_cumprod[t]
snake_case__ : List[str] = jnp.where(t > 0 ,state.common.alphas_cumprod[t - 1] ,jnp.array(1.0 ,dtype=self.dtype ) )
snake_case__ : Any = 1 - alpha_prod_t
snake_case__ : Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
snake_case__ : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
snake_case__ : Any = model_output
elif self.config.prediction_type == "v_prediction":
snake_case__ : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
snake_case__ : str = jnp.clip(__lowercase ,-1 ,1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ : Optional[int] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
snake_case__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
snake_case__ : int = jax.random.split(__lowercase ,num=1 )
snake_case__ : Tuple = jax.random.normal(__lowercase ,shape=model_output.shape ,dtype=self.dtype )
return (self._get_variance(__lowercase ,__lowercase ,predicted_variance=__lowercase ) ** 0.5) * noise
snake_case__ : List[Any] = jnp.where(t > 0 ,random_variance() ,jnp.zeros(model_output.shape ,dtype=self.dtype ) )
snake_case__ : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__lowercase ,state=__lowercase )
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :DDPMSchedulerState ,__lowercase :jnp.ndarray ,__lowercase :jnp.ndarray ,__lowercase :jnp.ndarray ,):
return add_noise_common(state.common ,__lowercase ,__lowercase ,__lowercase )
def __lowerCamelCase ( self :List[Any] ,__lowercase :DDPMSchedulerState ,__lowercase :jnp.ndarray ,__lowercase :jnp.ndarray ,__lowercase :jnp.ndarray ,):
return get_velocity_common(state.common ,__lowercase ,__lowercase ,__lowercase )
def __len__( self :str ):
return self.config.num_train_timesteps
| 219 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def A_ ( snake_case_ : Union[str, Any] ,snake_case_ : Tuple ,snake_case_ : Dict ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 499 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
a : List[Any] = logging.get_logger(__name__)
a : Union[str, Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a : Dict = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
a : List[Any] = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
a : Dict = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
a : List[Any] = {F"""funnel-transformer/{name}""": {"""do_lower_case""": True} for name in _model_names}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = FunnelTokenizer
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = 2
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__="<unk>" , snake_case__="<sep>" , snake_case__="<pad>" , snake_case__="<cls>" , snake_case__="<mask>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__=True , snake_case__=True , snake_case__=None , snake_case__="##" , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , clean_text=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , wordpieces_prefix=snake_case__ , **snake_case__ , )
lowercase__ : Union[str, Any]= json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars
):
lowercase__ : Any= getattr(snake_case__ , normalizer_state.pop("type" ) )
lowercase__ : Optional[Any]= do_lower_case
lowercase__ : Optional[Any]= strip_accents
lowercase__ : Optional[int]= tokenize_chinese_chars
lowercase__ : Optional[int]= normalizer_class(**snake_case__ )
lowercase__ : Optional[int]= do_lower_case
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
lowercase__ : str= [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : int= [self.sep_token_id]
lowercase__ : List[str]= [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : List[str]= self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 218 | 0 |
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase_ : tuple[int, int] , UpperCAmelCase_ : int )-> Tuple:
"""simple docstring"""
a , a =position
a =[
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a =[]
for position in positions:
a , a =position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_UpperCamelCase )
return permissible_positions
def lowerCamelCase ( UpperCAmelCase_ : list[list[int]] )-> Any:
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def lowerCamelCase ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : tuple[int, int] , UpperCAmelCase_ : int )-> Optional[int]:
"""simple docstring"""
if is_complete(_UpperCamelCase ):
return True
for position in get_valid_pos(_UpperCamelCase , len(_UpperCamelCase ) ):
a , a =position
if board[y][x] == 0:
a =curr + 1
if open_knight_tour_helper(_UpperCamelCase , _UpperCamelCase , curr + 1 ):
return True
a =0
return False
def lowerCamelCase ( UpperCAmelCase_ : int )-> str:
"""simple docstring"""
a =[[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
a =1
if open_knight_tour_helper(_UpperCamelCase , (i, j) , 1 ):
return board
a =0
a =F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
_lowerCamelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def lowerCamelCase ( )-> None:
"""simple docstring"""
a =input("""Enter message: """ )
a =input("""Enter key [alphanumeric]: """ )
a =input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
a ="""encrypt"""
a =encrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
elif mode.lower().startswith("""d""" ):
a ="""decrypt"""
a =decrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase_ )
def lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str )-> str:
"""simple docstring"""
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str )-> str:
"""simple docstring"""
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str )-> str:
"""simple docstring"""
a =[]
a =0
a =key.upper()
for symbol in message:
a =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase_ ):
a =0
else:
translated.append(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 321 | 0 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[int] , __lowerCamelCase : Callable , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[dict] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(
features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
_snake_case = Generator(
cache_dir=__lowerCamelCase , features=__lowerCamelCase , generator=__lowerCamelCase , gen_kwargs=__lowerCamelCase , **__lowerCamelCase , )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
# Build iterable dataset
if self.streaming:
_snake_case = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
_snake_case = self.builder.as_dataset(
split='''train''' , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 103 | import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
print('''Loading config file...''' )
def flatten_yaml_as_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="" , SCREAMING_SNAKE_CASE="." ):
__UpperCamelCase :Dict = []
for k, v in d.items():
__UpperCamelCase :List[Any] = parent_key + sep + k if parent_key else k
if isinstance(SCREAMING_SNAKE_CASE , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , sep=SCREAMING_SNAKE_CASE ).items() )
else:
items.append((new_key, v) )
return dict(SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = argparse.Namespace()
with open(SCREAMING_SNAKE_CASE , '''r''' ) as yaml_file:
try:
__UpperCamelCase :Dict = yaml.load(SCREAMING_SNAKE_CASE , Loader=yaml.FullLoader )
__UpperCamelCase :int = flatten_yaml_as_dict(SCREAMING_SNAKE_CASE )
for k, v in flat_cfg.items():
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(SCREAMING_SNAKE_CASE , str(SCREAMING_SNAKE_CASE ) ) )
return config
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = MobileViTVaConfig()
__UpperCamelCase :str = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__UpperCamelCase :str = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase :List[Any] = 384
else:
__UpperCamelCase :Dict = 256
__UpperCamelCase :List[Any] = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__UpperCamelCase :str = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase :Optional[Any] = 384
else:
__UpperCamelCase :Optional[Any] = 256
__UpperCamelCase :Optional[Any] = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
__UpperCamelCase :List[Any] = 151
__UpperCamelCase :List[Any] = 512
__UpperCamelCase :List[str] = '''ade20k-id2label.json'''
__UpperCamelCase :str = True
elif task_name.startswith('''voc_''' ):
__UpperCamelCase :Union[str, Any] = 21
__UpperCamelCase :Optional[Any] = 512
__UpperCamelCase :int = '''pascal-voc-id2label.json'''
__UpperCamelCase :Optional[Any] = True
# orig_config
__UpperCamelCase :Optional[Any] = load_orig_config_file(SCREAMING_SNAKE_CASE )
assert getattr(SCREAMING_SNAKE_CASE , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
__UpperCamelCase :Tuple = getattr(SCREAMING_SNAKE_CASE , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(SCREAMING_SNAKE_CASE , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__UpperCamelCase :Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
__UpperCamelCase :Tuple = getattr(SCREAMING_SNAKE_CASE , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
__UpperCamelCase :List[str] = getattr(SCREAMING_SNAKE_CASE , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
__UpperCamelCase :List[str] = getattr(SCREAMING_SNAKE_CASE , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
__UpperCamelCase :Tuple = '''huggingface/label-files'''
__UpperCamelCase :List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase :Optional[int] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__UpperCamelCase :Tuple = idalabel
__UpperCamelCase :Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = dct.pop(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = val
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
if base_model:
__UpperCamelCase :Dict = ''''''
else:
__UpperCamelCase :Optional[Any] = '''mobilevitv2.'''
__UpperCamelCase :int = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__UpperCamelCase :Optional[int] = k[8:]
else:
__UpperCamelCase :Optional[int] = k
if ".block." in k:
__UpperCamelCase :Optional[int] = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
__UpperCamelCase :int = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
__UpperCamelCase :Tuple = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
__UpperCamelCase :Any = k_new.replace('''conv_1.''' , f"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if f"""layer_{i}.""" in k:
__UpperCamelCase :Union[str, Any] = k_new.replace(f"""layer_{i}.""" , f"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
__UpperCamelCase :Any = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
__UpperCamelCase :int = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"""layer_{i}.0.""" in k:
__UpperCamelCase :Dict = k_new.replace(f"""layer_{i}.0.""" , f"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if f"""layer_{i}.1.local_rep.0.""" in k:
__UpperCamelCase :Any = k_new.replace(f"""layer_{i}.1.local_rep.0.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if f"""layer_{i}.1.local_rep.1.""" in k:
__UpperCamelCase :Any = k_new.replace(f"""layer_{i}.1.local_rep.1.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
__UpperCamelCase :List[Any] = [0, 1]
elif i == 4:
__UpperCamelCase :int = [0, 1, 2, 3]
elif i == 5:
__UpperCamelCase :Any = [0, 1, 2]
for j in j_in:
if f"""layer_{i}.1.global_rep.{j}.""" in k:
__UpperCamelCase :int = k_new.replace(
f"""layer_{i}.1.global_rep.{j}.""" , f"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if f"""layer_{i}.1.global_rep.{j+1}.""" in k:
__UpperCamelCase :List[Any] = k_new.replace(
f"""layer_{i}.1.global_rep.{j+1}.""" , f"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if f"""layer_{i}.1.conv_proj.""" in k:
__UpperCamelCase :List[Any] = k_new.replace(f"""layer_{i}.1.conv_proj.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
__UpperCamelCase :Optional[Any] = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__UpperCamelCase :Tuple = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
__UpperCamelCase :Any = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__UpperCamelCase :Dict = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__UpperCamelCase :Union[str, Any] = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
__UpperCamelCase :Union[str, Any] = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
__UpperCamelCase :str = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
__UpperCamelCase :Union[str, Any] = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
__UpperCamelCase :Optional[int] = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(SCREAMING_SNAKE_CASE )
for k in keys_to_ignore:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__UpperCamelCase :int = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = get_mobilevitva_config(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load original state_dict
__UpperCamelCase :int = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__UpperCamelCase :Any = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE ).eval()
__UpperCamelCase :str = False
else:
__UpperCamelCase :Optional[int] = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE ).eval()
__UpperCamelCase :List[str] = False
# remove and rename some keys of load the original model
__UpperCamelCase :Union[str, Any] = checkpoint
remove_unused_keys(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = create_rename_keys(SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load modified state_dict
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCamelCase :Tuple = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__UpperCamelCase :Dict = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCamelCase :int = model(**SCREAMING_SNAKE_CASE )
# verify classification model
if task_name.startswith('''imagenet''' ):
__UpperCamelCase :str = outputs.logits
__UpperCamelCase :Optional[int] = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__UpperCamelCase :Optional[Any] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] )
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowercase = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 167 | 0 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : list[int] ): # This function is recursive
'''simple docstring'''
_lowerCamelCase : List[Any] = len(A_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_lowerCamelCase : List[str] = array[0]
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Dict = 1
_lowerCamelCase : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Any = [element for element in array[i:] if element >= array[i]]
_lowerCamelCase : Optional[Any] = longest_subsequence(A_ )
if len(A_ ) > len(A_ ):
_lowerCamelCase : str = temp_array
else:
i += 1
_lowerCamelCase : List[Any] = [element for element in array[1:] if element >= pivot]
_lowerCamelCase : Optional[int] = [pivot, *longest_subsequence(A_ )]
if len(A_ ) > len(A_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 598 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
lowerCAmelCase__ = '''▁'''
# Segments (not really needed)
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 3
lowerCAmelCase__ = 4
class __snake_case ( _lowercase):
snake_case__ : List[str] = VOCAB_FILES_NAMES
snake_case__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Dict = "left"
snake_case__ : Optional[Any] = XLNetTokenizer
def __init__( self : Dict , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Tuple="</s>" , __lowerCAmelCase : Optional[Any]="<unk>" , __lowerCAmelCase : str="<sep>" , __lowerCAmelCase : Union[str, Any]="<pad>" , __lowerCAmelCase : Any="<cls>" , __lowerCAmelCase : List[str]="<mask>" , __lowerCAmelCase : List[str]=["<eop>", "<eod>"] , **__lowerCAmelCase : Tuple , ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
super().__init__(
vocab_file=__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Any = 3
_lowerCamelCase : Optional[int] = do_lower_case
_lowerCamelCase : Tuple = remove_space
_lowerCamelCase : int = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : str = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : Optional[int] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
return (out_vocab_file,)
| 598 | 1 |
from __future__ import annotations
def A ( lowercase__ : str , lowercase__ : list[str] | None = None , lowercase__ : dict[str, float] | None = None , lowercase__ : bool = False , ) -> tuple[int, float, str]:
UpperCamelCase__ :Dict = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCamelCase__ :Optional[Any] = {
"""a""": 0.08497,
"""b""": 0.01492,
"""c""": 0.02202,
"""d""": 0.04253,
"""e""": 0.11162,
"""f""": 0.02228,
"""g""": 0.02015,
"""h""": 0.06094,
"""i""": 0.07546,
"""j""": 0.00153,
"""k""": 0.01292,
"""l""": 0.04025,
"""m""": 0.02406,
"""n""": 0.06749,
"""o""": 0.07507,
"""p""": 0.01929,
"""q""": 0.00095,
"""r""": 0.07587,
"""s""": 0.06327,
"""t""": 0.09356,
"""u""": 0.02758,
"""v""": 0.00978,
"""w""": 0.02560,
"""x""": 0.00150,
"""y""": 0.01994,
"""z""": 0.00077,
}
else:
# Custom frequencies dictionary
UpperCamelCase__ :Optional[int] = frequencies_dict
if not case_sensitive:
UpperCamelCase__ :int = ciphertext.lower()
# Chi squared statistic values
UpperCamelCase__ :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowercase__ ) ):
UpperCamelCase__ :int = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCamelCase__ :int = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCamelCase__ :Optional[int] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCamelCase__ :Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :Optional[int] = decrypted_with_shift.lower().count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :Dict = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase__ :List[str] = decrypted_with_shift.count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase__ :Union[str, Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase__ :List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCamelCase__ :Union[str, Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase__ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCamelCase__ :int = min(
lowercase__ , key=lowercase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
) | 45 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A )
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = 100 ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
if audio_length_in_s is None:
_lowerCAmelCase : str = self.unet.config.sample_size / self.unet.config.sample_rate
_lowerCAmelCase : List[str] = audio_length_in_s * self.unet.config.sample_rate
_lowerCAmelCase : int = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
_lowerCAmelCase : Tuple = int(_A )
if sample_size % down_scale_factor != 0:
_lowerCAmelCase : Any = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
' process.' )
_lowerCAmelCase : List[Any] = int(_A )
_lowerCAmelCase : Dict = next(iter(self.unet.parameters() ) ).dtype
_lowerCAmelCase : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A ,_A ) and len(_A ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_lowerCAmelCase : List[Any] = randn_tensor(_A ,generator=_A ,device=self.device ,dtype=_A )
# set step values
self.scheduler.set_timesteps(_A ,device=audio.device )
_lowerCAmelCase : List[Any] = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCAmelCase : Any = self.unet(_A ,_A ).sample
# 2. compute previous image: x_t -> t_t-1
_lowerCAmelCase : Tuple = self.scheduler.step(_A ,_A ,_A ).prev_sample
_lowerCAmelCase : List[Any] = audio.clamp(-1 ,1 ).float().cpu().numpy()
_lowerCAmelCase : Optional[Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 259 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = 384
__a = 7
if "tiny" in model_name:
__a = 96
__a = (2, 2, 6, 2)
__a = (3, 6, 12, 24)
elif "small" in model_name:
__a = 96
__a = (2, 2, 18, 2)
__a = (3, 6, 12, 24)
elif "base" in model_name:
__a = 128
__a = (2, 2, 18, 2)
__a = (4, 8, 16, 32)
__a = 12
__a = 512
elif "large" in model_name:
__a = 192
__a = (2, 2, 18, 2)
__a = (6, 12, 24, 48)
__a = 12
__a = 768
# set label information
__a = 150
__a = """huggingface/label-files"""
__a = """ade20k-id2label.json"""
__a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a = {v: k for k, v in idalabel.items()}
__a = SwinConfig(
embed_dim=_SCREAMING_SNAKE_CASE , depths=_SCREAMING_SNAKE_CASE , num_heads=_SCREAMING_SNAKE_CASE , window_size=_SCREAMING_SNAKE_CASE , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
__a = UperNetConfig(
backbone_config=_SCREAMING_SNAKE_CASE , auxiliary_in_channels=_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE , )
return config
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
__a = dct.pop(_SCREAMING_SNAKE_CASE )
__a = val
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
__a = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__a = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__a = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
__a = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__a = in_proj_weight[:dim, :]
__a = in_proj_bias[: dim]
__a = in_proj_weight[
dim : dim * 2, :
]
__a = in_proj_bias[
dim : dim * 2
]
__a = in_proj_weight[
-dim :, :
]
__a = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a , __a = x.shape
__a = x.reshape(_SCREAMING_SNAKE_CASE , 4 , in_channel // 4 )
__a = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return x
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a , __a = x.shape
__a = x.reshape(_SCREAMING_SNAKE_CASE , in_channel // 4 , 4 )
__a = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return x
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = x.shape[0]
__a = x.reshape(4 , in_channel // 4 )
__a = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_SCREAMING_SNAKE_CASE )
return x
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a = x.shape[0]
__a = x.reshape(in_channel // 4 , 4 )
__a = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_SCREAMING_SNAKE_CASE )
return x
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
__a = model_name_to_url[model_name]
__a = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" , file_name=_SCREAMING_SNAKE_CASE )[
"""state_dict"""
]
for name, param in state_dict.items():
print(_SCREAMING_SNAKE_CASE , param.shape )
__a = get_upernet_config(_SCREAMING_SNAKE_CASE )
__a = UperNetForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__a = state_dict.pop(_SCREAMING_SNAKE_CASE )
if "bn" in key:
__a = key.replace("""bn""" , """batch_norm""" )
__a = val
# rename keys
__a = create_rename_keys(_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__a = reverse_correct_unfold_reduction_order(_SCREAMING_SNAKE_CASE )
if "norm" in key:
__a = reverse_correct_unfold_norm_order(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify on image
__a = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
__a = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert("""RGB""" )
__a = SegformerImageProcessor()
__a = processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
__a = model(_SCREAMING_SNAKE_CASE )
__a = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__a = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
__a = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
__a = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
__a = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[F"""upernet-swin-{size}""" for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 718 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 547 | 0 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a__ :
@staticmethod
def a_ ( *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Tuple):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class a__ ( unittest.TestCase ):
@require_torch
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
__UpperCAmelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__UpperCAmelCase : Dict = image_classifier(UpperCamelCase_ , candidate_labels=["a", "b", "c"])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCamelCase_) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
__UpperCAmelCase : Union[str, Any] = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2)
self.assertEqual(
nested_simplify(UpperCamelCase_) , [
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
] , )
@require_tf
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf")
__UpperCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__UpperCAmelCase : str = image_classifier(UpperCamelCase_ , candidate_labels=["a", "b", "c"])
self.assertEqual(
nested_simplify(UpperCamelCase_) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
__UpperCAmelCase : str = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2)
self.assertEqual(
nested_simplify(UpperCamelCase_) , [
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
[
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
{"score": 0.333, "label": ANY(UpperCamelCase_)},
],
] , )
@slow
@require_torch
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
__UpperCAmelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__UpperCAmelCase : Optional[int] = image_classifier(UpperCamelCase_ , candidate_labels=["cat", "plane", "remote"])
self.assertEqual(
nested_simplify(UpperCamelCase_) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
__UpperCAmelCase : Dict = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2)
self.assertEqual(
nested_simplify(UpperCamelCase_) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf")
# This is an image of 2 cats with remotes and no planes
__UpperCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
__UpperCAmelCase : str = image_classifier(UpperCamelCase_ , candidate_labels=["cat", "plane", "remote"])
self.assertEqual(
nested_simplify(UpperCamelCase_) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
__UpperCAmelCase : Union[str, Any] = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2)
self.assertEqual(
nested_simplify(UpperCamelCase_) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 77 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """poolformer"""
def __init__( self : str , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : List[str]=4.0 , lowerCamelCase_ : Tuple=[2, 2, 6, 2] , lowerCamelCase_ : Optional[Any]=[64, 128, 320, 512] , lowerCamelCase_ : Tuple=[7, 3, 3, 3] , lowerCamelCase_ : Optional[Any]=[4, 2, 2, 2] , lowerCamelCase_ : Dict=[2, 1, 1, 1] , lowerCamelCase_ : Tuple=4 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Union[str, Any]=1E-5 , lowerCamelCase_ : int=0.0_2 , **lowerCamelCase_ : str , ):
"""simple docstring"""
UpperCamelCase = num_channels
UpperCamelCase = patch_size
UpperCamelCase = stride
UpperCamelCase = padding
UpperCamelCase = pool_size
UpperCamelCase = hidden_sizes
UpperCamelCase = mlp_ratio
UpperCamelCase = depths
UpperCamelCase = patch_sizes
UpperCamelCase = strides
UpperCamelCase = num_encoder_blocks
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_layer_scale
UpperCamelCase = layer_scale_init_value
UpperCamelCase = initializer_range
super().__init__(**lowerCamelCase_ )
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
return 2E-3
| 537 | 0 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[str] = VideoMAEConfig()
set_architecture_configs(lowerCAmelCase_ , lowerCAmelCase_)
if "finetuned" not in model_name:
lowerCamelCase_ : Tuple = False
if "finetuned" in model_name:
lowerCamelCase_ : Optional[int] = "huggingface/label-files"
if "kinetics" in model_name:
lowerCamelCase_ : Union[str, Any] = 400
lowerCamelCase_ : Dict = "kinetics400-id2label.json"
elif "ssv2" in model_name:
lowerCamelCase_ : Optional[int] = 174
lowerCamelCase_ : Optional[int] = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.")
lowerCamelCase_ : Dict = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset") , "r"))
lowerCamelCase_ : str = {int(lowerCAmelCase_): v for k, v in idalabel.items()}
lowerCamelCase_ : Tuple = idalabel
lowerCamelCase_ : Dict = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if "small" in model_name:
lowerCamelCase_ : Dict = 384
lowerCamelCase_ : Optional[int] = 1536
lowerCamelCase_ : int = 12
lowerCamelCase_ : Tuple = 16
lowerCamelCase_ : List[str] = 12
lowerCamelCase_ : Dict = 3
lowerCamelCase_ : Dict = 192
lowerCamelCase_ : Any = 768
elif "large" in model_name:
lowerCamelCase_ : Any = 1024
lowerCamelCase_ : Tuple = 4096
lowerCamelCase_ : Union[str, Any] = 24
lowerCamelCase_ : List[str] = 16
lowerCamelCase_ : Union[str, Any] = 12
lowerCamelCase_ : Any = 8
lowerCamelCase_ : Tuple = 512
lowerCamelCase_ : str = 2048
elif "huge" in model_name:
lowerCamelCase_ : str = 1280
lowerCamelCase_ : int = 5120
lowerCamelCase_ : List[Any] = 32
lowerCamelCase_ : Dict = 16
lowerCamelCase_ : Tuple = 12
lowerCamelCase_ : Union[str, Any] = 8
lowerCamelCase_ : Dict = 640
lowerCamelCase_ : Tuple = 2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"")
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
if "encoder." in name:
lowerCamelCase_ : List[str] = name.replace("encoder." , "")
if "cls_token" in name:
lowerCamelCase_ : List[str] = name.replace("cls_token" , "videomae.embeddings.cls_token")
if "decoder_pos_embed" in name:
lowerCamelCase_ : Optional[Any] = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed")
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase_ : List[Any] = name.replace("pos_embed" , "videomae.embeddings.position_embeddings")
if "patch_embed.proj" in name:
lowerCamelCase_ : List[Any] = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection")
if "patch_embed.norm" in name:
lowerCamelCase_ : Optional[int] = name.replace("patch_embed.norm" , "videomae.embeddings.norm")
if "decoder.blocks" in name:
lowerCamelCase_ : Optional[int] = name.replace("decoder.blocks" , "decoder.decoder_layers")
if "blocks" in name:
lowerCamelCase_ : Dict = name.replace("blocks" , "videomae.encoder.layer")
if "attn.proj" in name:
lowerCamelCase_ : Union[str, Any] = name.replace("attn.proj" , "attention.output.dense")
if "attn" in name and "bias" not in name:
lowerCamelCase_ : Any = name.replace("attn" , "attention.self")
if "attn" in name:
lowerCamelCase_ : Any = name.replace("attn" , "attention.attention")
if "norm1" in name:
lowerCamelCase_ : Tuple = name.replace("norm1" , "layernorm_before")
if "norm2" in name:
lowerCamelCase_ : str = name.replace("norm2" , "layernorm_after")
if "mlp.fc1" in name:
lowerCamelCase_ : Tuple = name.replace("mlp.fc1" , "intermediate.dense")
if "mlp.fc2" in name:
lowerCamelCase_ : Optional[int] = name.replace("mlp.fc2" , "output.dense")
if "decoder_embed" in name:
lowerCamelCase_ : Any = name.replace("decoder_embed" , "decoder.decoder_embed")
if "decoder_norm" in name:
lowerCamelCase_ : Tuple = name.replace("decoder_norm" , "decoder.decoder_norm")
if "decoder_pred" in name:
lowerCamelCase_ : List[str] = name.replace("decoder_pred" , "decoder.decoder_pred")
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase_ : List[Any] = name.replace("norm.weight" , "videomae.layernorm.weight")
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCamelCase_ : Optional[Any] = name.replace("norm.bias" , "videomae.layernorm.bias")
if "head" in name and "decoder" not in name:
lowerCamelCase_ : List[Any] = name.replace("head" , "classifier")
return name
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase_ : int = orig_state_dict.pop(lowerCAmelCase_)
if key.startswith("encoder."):
lowerCamelCase_ : Any = key.replace("encoder." , "")
if "qkv" in key:
lowerCamelCase_ : List[str] = key.split(".")
if key.startswith("decoder.blocks"):
lowerCamelCase_ : Tuple = config.decoder_hidden_size
lowerCamelCase_ : int = int(key_split[2])
lowerCamelCase_ : List[str] = "decoder.decoder_layers."
if "weight" in key:
lowerCamelCase_ : str = val[:dim, :]
lowerCamelCase_ : List[Any] = val[dim : dim * 2, :]
lowerCamelCase_ : Union[str, Any] = val[-dim:, :]
else:
lowerCamelCase_ : Union[str, Any] = config.hidden_size
lowerCamelCase_ : Any = int(key_split[1])
lowerCamelCase_ : List[Any] = "videomae.encoder.layer."
if "weight" in key:
lowerCamelCase_ : Optional[int] = val[:dim, :]
lowerCamelCase_ : Dict = val[dim : dim * 2, :]
lowerCamelCase_ : List[Any] = val[-dim:, :]
else:
lowerCamelCase_ : Dict = val
return orig_state_dict
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset")
lowerCamelCase_ : Union[str, Any] = np.load(lowerCAmelCase_)
return list(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Any = get_videomae_config(lowerCAmelCase_)
if "finetuned" in model_name:
lowerCamelCase_ : Optional[Any] = VideoMAEForVideoClassification(lowerCAmelCase_)
else:
lowerCamelCase_ : Union[str, Any] = VideoMAEForPreTraining(lowerCAmelCase_)
# download original checkpoint, hosted on Google Drive
lowerCamelCase_ : Tuple = "pytorch_model.bin"
gdown.cached_download(lowerCAmelCase_ , lowerCAmelCase_ , quiet=lowerCAmelCase_)
lowerCamelCase_ : Optional[Any] = torch.load(lowerCAmelCase_ , map_location="cpu")
if "model" in files:
lowerCamelCase_ : Dict = files["model"]
else:
lowerCamelCase_ : Optional[Any] = files["module"]
lowerCamelCase_ : Optional[int] = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_)
model.load_state_dict(lowerCAmelCase_)
model.eval()
# verify model on basic input
lowerCamelCase_ : List[Any] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
lowerCamelCase_ : Any = prepare_video()
lowerCamelCase_ : Optional[Any] = image_processor(lowerCAmelCase_ , return_tensors="pt")
if "finetuned" not in model_name:
lowerCamelCase_ : Union[str, Any] = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt")
lowerCamelCase_ : Union[str, Any] = torch.load(lowerCAmelCase_)
lowerCamelCase_ : str = model(**lowerCAmelCase_)
lowerCamelCase_ : List[str] = outputs.logits
lowerCamelCase_ : List[Any] = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCamelCase_ : str = torch.Size([1, 400])
lowerCamelCase_ : List[Any] = torch.tensor([-0.92_91, -0.40_61, -0.93_07])
elif model_name == "videomae-small-finetuned-ssv2":
lowerCamelCase_ : str = torch.Size([1, 174])
lowerCamelCase_ : Optional[int] = torch.tensor([0.26_71, -0.46_89, -0.82_35])
elif model_name == "videomae-base":
lowerCamelCase_ : List[Any] = torch.Size([1, 1408, 1536])
lowerCamelCase_ : Optional[int] = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]])
elif model_name == "videomae-base-short":
lowerCamelCase_ : Union[str, Any] = torch.Size([1, 1408, 1536])
lowerCamelCase_ : Any = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]])
# we verified the loss both for normalized and unnormalized targets for this one
lowerCamelCase_ : Any = torch.tensor([0.51_42]) if config.norm_pix_loss else torch.tensor([0.64_69])
elif model_name == "videomae-large":
lowerCamelCase_ : Dict = torch.Size([1, 1408, 1536])
lowerCamelCase_ : str = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]])
elif model_name == "videomae-large-finetuned-kinetics":
lowerCamelCase_ : int = torch.Size([1, 400])
lowerCamelCase_ : Optional[Any] = torch.tensor([0.07_71, 0.00_11, -0.36_25])
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCamelCase_ : Union[str, Any] = torch.Size([1, 400])
lowerCamelCase_ : Optional[Any] = torch.tensor([0.24_33, 0.16_32, -0.48_94])
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCamelCase_ : Tuple = torch.Size([1, 400])
lowerCamelCase_ : Tuple = torch.tensor([0.65_88, 0.09_90, -0.24_93])
elif model_name == "videomae-base-finetuned-kinetics":
lowerCamelCase_ : Optional[Any] = torch.Size([1, 400])
lowerCamelCase_ : Optional[Any] = torch.tensor([0.36_69, -0.06_88, -0.24_21])
elif model_name == "videomae-base-short-ssv2":
lowerCamelCase_ : List[str] = torch.Size([1, 1408, 1536])
lowerCamelCase_ : Optional[Any] = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]])
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCamelCase_ : Dict = torch.Size([1, 174])
lowerCamelCase_ : Tuple = torch.tensor([-0.05_37, -0.15_39, -0.32_66])
elif model_name == "videomae-base-ssv2":
lowerCamelCase_ : Union[str, Any] = torch.Size([1, 1408, 1536])
lowerCamelCase_ : Any = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]])
elif model_name == "videomae-base-finetuned-ssv2":
lowerCamelCase_ : Optional[int] = torch.Size([1, 174])
lowerCamelCase_ : List[Any] = torch.tensor([0.19_61, -0.83_37, -0.63_89])
else:
raise ValueError(F"""Model name not supported. Should be one of {model_names}""")
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1E-4)
else:
print("Logits:" , logits[0, :3, :3])
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase_ , atol=1E-4)
print("Logits ok!")
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCamelCase_ : Dict = outputs.loss
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-4)
print("Loss ok!")
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(lowerCAmelCase_)
model.save_pretrained(lowerCAmelCase_)
if push_to_hub:
print("Pushing to the hub...")
model.push_to_hub(lowerCAmelCase_ , organization="nielsr")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''',
type=str,
help=(
'''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'''
''' download link.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/Users/nielsrogge/Documents/VideoMAE/Test''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''')
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__magic_name__ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 73 |
def __magic_name__ ( lowerCAmelCase_ = 10 , lowerCAmelCase_ = 1000 , lowerCAmelCase_ = True):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
return int((number_a + number_a) / 2)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_) and isinstance(lowerCAmelCase_ , lowerCAmelCase_)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(lowerCAmelCase_) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowerCamelCase_ : Optional[int] = lower
lowerCamelCase_ : Tuple = higher
lowerCamelCase_ : Union[str, Any] = []
while True:
lowerCamelCase_ : Optional[int] = get_avg(lowerCAmelCase_ , lowerCAmelCase_)
last_numbers.append(lowerCAmelCase_)
if answer(lowerCAmelCase_) == "low":
lowerCamelCase_ : Any = number
elif answer(lowerCAmelCase_) == "high":
lowerCamelCase_ : Optional[int] = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""")
print(F"""details : {last_numbers!s}""")
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = int(input("Enter lower value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter high value : ").strip())
lowerCamelCase_ : List[str] = int(input("Enter value to guess : ").strip())
guess_the_number(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
if __name__ == "__main__":
main()
| 73 | 1 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
A : List[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
| 15 |
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=A__ ):
__A : str = ["""torch""", """scipy"""]
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] ) | 32 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_UpperCamelCase : str = """Create a default config file for Accelerate with only a few flags set."""
def __UpperCamelCase ( snake_case="no" , snake_case = default_json_config_file , snake_case = False ) -> int:
'''simple docstring'''
__A = Path(snake_case )
path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
if path.exists():
print(
F"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." )
return False
__A = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" )
__A = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
__A = torch.cuda.device_count()
__A = num_gpus
__A = False
if num_gpus > 1:
__A = '''MULTI_GPU'''
else:
__A = '''NO'''
elif is_xpu_available() and use_xpu:
__A = torch.xpu.device_count()
__A = num_xpus
__A = False
if num_xpus > 1:
__A = '''MULTI_XPU'''
else:
__A = '''NO'''
elif is_npu_available():
__A = torch.npu.device_count()
__A = num_npus
__A = False
if num_npus > 1:
__A = '''MULTI_NPU'''
else:
__A = '''NO'''
else:
__A = 0
__A = True
__A = 1
__A = '''NO'''
__A = ClusterConfig(**snake_case )
config.to_json_file(snake_case )
return path
def __UpperCamelCase ( snake_case , snake_case ) -> Tuple:
'''simple docstring'''
__A = parser.add_parser('''default''' , parents=snake_case , help=snake_case , formatter_class=snake_case )
parser.add_argument(
'''--config_file''' , default=snake_case , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=snake_case , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=snake_case )
return parser
def __UpperCamelCase ( snake_case ) -> Optional[int]:
'''simple docstring'''
__A = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"accelerate configuration saved at {config_file}" )
| 717 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : List[str] = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCamelCase : Dict = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
_UpperCamelCase : List[Any] = {
"""albert-base-v1""": 5_1_2,
"""albert-large-v1""": 5_1_2,
"""albert-xlarge-v1""": 5_1_2,
"""albert-xxlarge-v1""": 5_1_2,
"""albert-base-v2""": 5_1_2,
"""albert-large-v2""": 5_1_2,
"""albert-xlarge-v2""": 5_1_2,
"""albert-xxlarge-v2""": 5_1_2,
}
_UpperCamelCase : List[str] = """▁"""
class _lowerCAmelCase( _a):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = AlbertTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="[CLS]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="<unk>" , UpperCAmelCase="[SEP]" , UpperCAmelCase="<pad>" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , **UpperCAmelCase , )-> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__A = (
AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase , normalized=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase )
else mask_token
)
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , **UpperCAmelCase , )
__A = do_lower_case
__A = remove_space
__A = keep_accents
__A = vocab_file
__A = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase = None )-> List[int]:
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase = None )-> List[int]:
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase = None )-> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__A = os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 341 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self: Optional[int] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any]=12 ,__lowerCAmelCase: List[Any]=7 ,__lowerCAmelCase: str=True ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: str=99 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: List[Any]=2 ,__lowerCAmelCase: List[Any]=4 ,__lowerCAmelCase: Optional[Any]=37 ,__lowerCAmelCase: Dict=0.1 ,__lowerCAmelCase: List[Any]=0.1 ,__lowerCAmelCase: List[str]=512 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: int=0 ,__lowerCAmelCase: Optional[int]=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : str = seq_length
_lowerCamelCase : Any = is_training
_lowerCamelCase : List[str] = use_input_mask
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Any = projection_dim
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Dict = dropout
_lowerCamelCase : Union[str, Any] = attention_dropout
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Optional[Any] = scope
_lowerCamelCase : Optional[int] = bos_token_id
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCamelCase : List[str] = None
if self.use_input_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_lowerCamelCase : Optional[int] = input_mask.numpy()
_lowerCamelCase, _lowerCamelCase : str = input_mask.shape
_lowerCamelCase : Tuple = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = self.get_config()
return config, input_ids, tf.convert_to_tensor(__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = TFBlipTextModel(config=__lowerCAmelCase )
_lowerCamelCase : str = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,training=__lowerCAmelCase )
_lowerCamelCase : List[str] = model(__lowerCAmelCase ,training=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = config_and_inputs
_lowerCamelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = (TFBlipTextModel,) if is_tf_available() else ()
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = BlipTextModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self ,config_class=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int = TFBlipTextModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=__lowerCAmelCase ) | 46 |
from collections.abc import Generator
from math import sin
def _A (UpperCamelCase : bytes ) ->bytes:
'''simple docstring'''
if len(UpperCamelCase ) != 32:
raise ValueError("""Input must be of length 32""" )
lowerCamelCase__ : Tuple = B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _A (UpperCamelCase : int ) ->bytes:
'''simple docstring'''
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCamelCase__ : Union[str, Any] = format(UpperCamelCase , """08x""" )[-8:]
lowerCamelCase__ : Optional[Any] = B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def _A (UpperCamelCase : bytes ) ->bytes:
'''simple docstring'''
lowerCamelCase__ : Any = B""""""
for char in message:
bit_string += format(UpperCamelCase , """08b""" ).encode("""utf-8""" )
lowerCamelCase__ : Tuple = format(len(UpperCamelCase ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _A (UpperCamelCase : bytes ) ->Generator[list[int], None, None]:
'''simple docstring'''
if len(UpperCamelCase ) % 512 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(UpperCamelCase ) , 512 ):
lowerCamelCase__ : Optional[Any] = bit_string[pos : pos + 512]
lowerCamelCase__ : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _A (UpperCamelCase : int ) ->int:
'''simple docstring'''
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCamelCase__ : str = format(UpperCamelCase , """032b""" )
lowerCamelCase__ : Union[str, Any] = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase , 2 )
def _A (UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
return (a + b) % 2**32
def _A (UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _A (UpperCamelCase : bytes ) ->bytes:
'''simple docstring'''
lowerCamelCase__ : Optional[int] = preprocess(UpperCamelCase )
lowerCamelCase__ : int = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowerCamelCase__ : Optional[Any] = 0x6745_2301
lowerCamelCase__ : Optional[Any] = 0xEFCD_AB89
lowerCamelCase__ : str = 0x98BA_DCFE
lowerCamelCase__ : Dict = 0x1032_5476
lowerCamelCase__ : str = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase ):
lowerCamelCase__ : Any = aa
lowerCamelCase__ : List[Any] = ba
lowerCamelCase__ : Optional[int] = ca
lowerCamelCase__ : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCamelCase__ : Dict = d ^ (b & (c ^ d))
lowerCamelCase__ : Union[str, Any] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCamelCase__ : Union[str, Any] = c ^ (d & (b ^ c))
lowerCamelCase__ : Tuple = (5 * i + 1) % 16
elif i <= 47:
lowerCamelCase__ : Dict = b ^ c ^ d
lowerCamelCase__ : str = (3 * i + 5) % 16
else:
lowerCamelCase__ : int = c ^ (b | not_aa(UpperCamelCase ))
lowerCamelCase__ : Any = (7 * i) % 16
lowerCamelCase__ : Tuple = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCamelCase__ : str = d
lowerCamelCase__ : str = c
lowerCamelCase__ : Dict = b
lowerCamelCase__ : Optional[Any] = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCamelCase__ : Optional[Any] = sum_aa(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[Any] = sum_aa(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : int = sum_aa(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : List[Any] = sum_aa(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[Any] = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
UpperCAmelCase__ = Features({'image': Image()} )
UpperCAmelCase__ = Features({'labels': ClassLabel} )
UpperCAmelCase__ = "image"
UpperCAmelCase__ = "labels"
def snake_case ( self : int , __A : Dict ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
_lowercase = copy.deepcopy(self )
_lowercase = self.label_schema.copy()
_lowercase = features[self.label_column]
_lowercase = label_schema
return task_template
@property
def snake_case ( self : int ):
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 602 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : List[Any] = logging.get_logger(__name__)
def A__ ( A_ ) -> List[str]:
_lowercase = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
_lowercase = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , A_ )
if matches:
_lowercase = float(matches[1] )
_lowercase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_lowercase = 1_001
_lowercase = "imagenet-1k-id2label.json"
_lowercase = "huggingface/label-files"
_lowercase = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
_lowercase = {int(A_ ) + 1: v for k, v in idalabel.items()}
_lowercase = "background"
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
return config
def A__ ( ) -> str:
_lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowercase = Image.open(requests.get(A_ , stream=A_ ).raw )
return im
@torch.no_grad()
def A__ ( A_ , A_ , A_ , A_=False ) -> List[Any]:
_lowercase = get_mobilenet_va_config(A_ )
# Load 🤗 model
_lowercase = MobileNetVaForImageClassification(A_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(A_ , A_ , A_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_lowercase = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
_lowercase = image_processor(images=prepare_img() , return_tensors="pt" )
_lowercase = model(**A_ )
_lowercase = outputs.logits
assert logits.shape == (1, 1_001)
if model_name == "mobilenet_v1_1.0_224":
_lowercase = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
_lowercase = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
_lowercase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , A_ , atol=1e-4 )
Path(A_ ).mkdir(exist_ok=A_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A_ )
if push_to_hub:
print("Pushing to the hub..." )
_lowercase = "google/" + model_name
image_processor.push_to_hub(A_ )
model.push_to_hub(A_ )
if __name__ == "__main__":
__magic_name__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__magic_name__ : Union[str, Any] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 602 | 1 |
import os
def __snake_case ( ):
"""simple docstring"""
with open(os.path.dirname(__UpperCamelCase ) + "/grid.txt" ) as f:
A_ = [] # noqa: E741
for _ in range(20 ):
l.append([int(__UpperCamelCase ) for x in f.readline().split()] )
A_ = 0
# right
for i in range(20 ):
for j in range(17 ):
A_ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
A_ = temp
# down
for i in range(17 ):
for j in range(20 ):
A_ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
A_ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
A_ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
A_ = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
A_ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
A_ = temp
return maximum
if __name__ == "__main__":
print(solution()) | 86 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , SCREAMING_SNAKE_CASE , )
| 14 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCamelCase = 50_003
lowerCamelCase = 50_002
@require_sentencepiece
@require_tokenizers
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PLBartTokenizer
UpperCamelCase = None
UpperCamelCase = False
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="base" , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="base" , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 4 , _UpperCAmelCase )]
self.assertListEqual(_UpperCAmelCase , ["__java__", "__python__", "__en_XX__", "<mask>"] )
UpperCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCAmelCase_ = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer(_UpperCAmelCase , language_codes="multi" , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 7 , _UpperCAmelCase )]
self.assertListEqual(
_UpperCAmelCase , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
UpperCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCAmelCase_ = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = '''uclanlp/plbart-python-en_XX'''
UpperCamelCase = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
UpperCamelCase = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
UpperCamelCase = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def lowercase__ ( cls : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
UpperCAmelCase_ = 1
return cls
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 50003 )
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
UpperCAmelCase_ = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
UpperCAmelCase_ = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , _UpperCAmelCase )
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [50004, 50001] )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = PLBartTokenizer.from_pretrained(_UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCAmelCase )
@require_torch
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _UpperCAmelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=3 , return_tensors="pt" )
UpperCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10 , return_tensors="pt" )
UpperCAmelCase_ = targets["input_ids"]
UpperCAmelCase_ = shift_tokens_right(_UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 50003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 50001,
} , )
| 14 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 445 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowerCamelCase ( UpperCAmelCase_ : int = 8 ):
"""simple docstring"""
a :Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ ) )
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ):
"""simple docstring"""
i -= len(UpperCAmelCase_ )
a :Tuple = i // 3
a :int = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
a :Union[str, Any] = (
chars_incl
+ random(UpperCAmelCase_ , quotient + remainder )
+ random(UpperCAmelCase_ , UpperCAmelCase_ )
+ random(UpperCAmelCase_ , UpperCAmelCase_ )
)
a :Dict = list(UpperCAmelCase_ )
shuffle(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
# random is a generalised function for letters, characters and numbers
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ):
"""simple docstring"""
return "".join(secrets.choice(UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ ) )
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ):
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int = 8 ):
"""simple docstring"""
if len(UpperCAmelCase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
a :Dict = any(char in ascii_uppercase for char in password )
a :Optional[int] = any(char in ascii_lowercase for char in password )
a :Tuple = any(char in digits for char in password )
a :Any = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowerCamelCase ( ):
"""simple docstring"""
a :int = int(input('''Please indicate the max length of your password: ''' ).strip() )
a :Union[str, Any] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(UpperCAmelCase_ ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(UpperCAmelCase_ , UpperCAmelCase_ ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 445 | 1 |
import operator as op
__A : List[str] = 'scaler.pt'
__A : int = 'pytorch_model'
__A : Optional[int] = 'random_states'
__A : int = 'optimizer'
__A : Union[str, Any] = 'scheduler'
__A : List[Any] = 'pytorch_model.bin'
__A : Tuple = 'pytorch_model.bin.index.json'
__A : Any = 'model.safetensors'
__A : Any = 'model.safetensors.index.json'
__A : List[Any] = '1.10.2'
__A : List[Any] = 'py38'
__A : str = '4.17.0'
__A : Optional[Any] = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
__A : List[Any] = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
__A : Dict = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
__A : Optional[int] = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
__A : Union[str, Any] = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
__A : int = '2.0.1'
__A : int = ['pdsh', 'standard', 'openmpi', 'mvapich']
__A : List[Any] = ['default', 'reduce-overhead', 'max-autotune']
__A : str = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__A : Optional[Any] = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
__A : List[Any] = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
__A : List[str] = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 75 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
__A : Dict = namedtuple('covid_data', 'cases deaths recovered')
def __UpperCamelCase ( _A : str = "https://www.worldometers.info/coronavirus/" ) ->covid_data:
"""simple docstring"""
lowerCamelCase_ ="""//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(_A ).content ).xpath(_A ) )
__A : Union[str, Any] = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 75 | 1 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
SCREAMING_SNAKE_CASE__ = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
SCREAMING_SNAKE_CASE__ = json.loads(UpperCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
SCREAMING_SNAKE_CASE__ = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
SCREAMING_SNAKE_CASE__ = json.loads(UpperCamelCase__ )
if not mpi_options.get("""sagemaker_mpi_enabled""" , UpperCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , __A , )
@cached_property
def _snake_case ( self :List[Any] ) -> "torch.device":
"""simple docstring"""
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
SCREAMING_SNAKE_CASE__ = torch.device("""cpu""" )
SCREAMING_SNAKE_CASE__ = 0
elif is_sagemaker_model_parallel_available():
SCREAMING_SNAKE_CASE__ = smp.local_rank()
SCREAMING_SNAKE_CASE__ = torch.device("""cuda""" , __A )
SCREAMING_SNAKE_CASE__ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
SCREAMING_SNAKE_CASE__ = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
SCREAMING_SNAKE_CASE__ = torch.device("""cuda""" , self.local_rank )
SCREAMING_SNAKE_CASE__ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
SCREAMING_SNAKE_CASE__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
SCREAMING_SNAKE_CASE__ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
SCREAMING_SNAKE_CASE__ = torch.device("""cuda""" , self.local_rank )
SCREAMING_SNAKE_CASE__ = 1
if device.type == "cuda":
torch.cuda.set_device(__A )
return device
@property
def _snake_case ( self :Tuple ) -> int:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _snake_case ( self :int ) -> int:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
return False | 6 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "encoder-decoder"
lowerCamelCase_ = True
def __init__( self :Optional[int] , **__A :str ) -> int:
"""simple docstring"""
super().__init__(**__A )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = True
@classmethod
def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A )
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.encoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.decoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output | 6 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowercase ( lowerCamelCase_ : list[int] ):
return len(set(lowerCamelCase_ ) ) == len(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112 |
"""simple docstring"""
from math import sqrt
def __lowercase ( lowerCamelCase_ : int ):
SCREAMING_SNAKE_CASE__ = 0
for i in range(1 , int(sqrt(lowerCamelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCamelCase_ ):
total += i + n // i
elif i == sqrt(lowerCamelCase_ ):
total += i
return total - n
def __lowercase ( lowerCamelCase_ : int = 10000 ):
SCREAMING_SNAKE_CASE__ = sum(
i
for i in range(1 , lowerCamelCase_ )
if sum_of_divisors(sum_of_divisors(lowerCamelCase_ ) ) == i and sum_of_divisors(lowerCamelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 112 | 1 |
a__ = {
"""km/h""": 1.0,
"""m/s""": 3.6,
"""mph""": 1.609_344,
"""knot""": 1.852,
}
a__ = {
"""km/h""": 1.0,
"""m/s""": 0.277_777_778,
"""mph""": 0.621_371_192,
"""knot""": 0.539_956_803,
}
def lowercase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
_snake_case : List[Any] = (
F'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
F'''Valid values are: {', '.join(SCREAMING_SNAKE_CASE__ )}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 477 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def UpperCAmelCase ( A : Union[str, Any] ):
'''simple docstring'''
return choice(A )
def UpperCAmelCase ( A : list[int] , A : int ):
'''simple docstring'''
_UpperCAmelCase = random_pivot(A )
# partition based on pivot
# linear time
_UpperCAmelCase = [e for e in lst if e < pivot]
_UpperCAmelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A ) < k - 1:
return kth_number(A , k - len(A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(A , A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 573 | 0 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE : Optional[Any] = parse(importlib.metadata.version("torch"))
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
_lowercase : List[str] = STR_OPERATION_TO_FUNC[operation]
if isinstance(a__ , a__ ):
_lowercase : Dict = parse(importlib.metadata.version(a__ ) )
return operation(a__ , parse(a__ ) )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
return compare_versions(a__ , a__ , a__ )
| 716 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCamelCase( _a ):
lowercase_ : int = ["""image_processor""", """tokenizer"""]
lowercase_ : List[str] = """CLIPImageProcessor"""
lowercase_ : Union[str, Any] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase, )
_lowercase : Dict = kwargs.pop('feature_extractor')
_lowercase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowerCamelCase, lowerCamelCase)
def __call__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> Tuple:
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
_lowercase : Optional[Any] = self.tokenizer(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase)
if images is not None:
_lowercase : List[Any] = self.image_processor(lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase)
if text is not None and images is not None:
_lowercase : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase), tensor_type=lowerCamelCase)
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : int = self.tokenizer.model_input_names
_lowercase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', lowerCamelCase, )
return self.image_processor_class
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', lowerCamelCase, )
return self.image_processor
| 354 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class lowerCAmelCase_ ( lowerCamelCase__):
lowerCamelCase_ = 'luke'
def __init__( self : Any , __A : Optional[Any]=50267 , __A : Dict=500000 , __A : Optional[int]=768 , __A : Optional[Any]=256 , __A : Union[str, Any]=12 , __A : Optional[Any]=12 , __A : str=3072 , __A : Optional[int]="gelu" , __A : Optional[int]=0.1 , __A : Optional[Any]=0.1 , __A : Optional[int]=512 , __A : Dict=2 , __A : Tuple=0.02 , __A : str=1E-12 , __A : Optional[Any]=True , __A : List[str]=None , __A : List[str]=1 , __A : Optional[Any]=0 , __A : List[Any]=2 , **__A : Dict , ) ->Tuple:
"""simple docstring"""
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
a__ :Union[str, Any] = vocab_size
a__ :str = entity_vocab_size
a__ :List[str] = hidden_size
a__ :Optional[int] = entity_emb_size
a__ :int = num_hidden_layers
a__ :Optional[Any] = num_attention_heads
a__ :Tuple = hidden_act
a__ :Dict = intermediate_size
a__ :Union[str, Any] = hidden_dropout_prob
a__ :Any = attention_probs_dropout_prob
a__ :Any = max_position_embeddings
a__ :Optional[Any] = type_vocab_size
a__ :Dict = initializer_range
a__ :Optional[int] = layer_norm_eps
a__ :Union[str, Any] = use_entity_aware_attention
a__ :Optional[Any] = classifier_dropout
| 395 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def A__ ( A_ ) -> str:
_lowercase = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
def A__ ( A_ ) -> Optional[Any]:
_lowercase , _lowercase = emb.weight.shape
_lowercase = nn.Linear(A_ , A_ , bias=A_ )
_lowercase = emb.weight.data
return lin_layer
def A__ ( A_ , A_=None ) -> int:
_lowercase = {}
for old_key in state_dict.keys():
_lowercase = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_lowercase = key.replace("moe_layer.experts.0" , F"""ffn.experts.expert_{expert_idx}""" )
else:
_lowercase = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
_lowercase = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
_lowercase = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
_lowercase = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
_lowercase = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
_lowercase = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
_lowercase = key.replace("final_layer_norm" , "ff_layer_norm" )
_lowercase = state_dict[old_key]
return new_dict
def A__ ( A_ , A_ , A_ , A_ , A_ = WEIGHTS_NAME ) -> Any:
_lowercase = []
_lowercase = 0
os.makedirs(A_ , exist_ok=A_ )
for expert in range(A_ ):
_lowercase = switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(A_ ):
_lowercase = torch.load(A_ )["model"]
remove_ignore_keys_(A_ )
_lowercase = rename_fairseq_keys(A_ , A_ )
_lowercase = os.path.join(
A_ , weights_name.replace(".bin" , F"""-{len(A_ )+1:05d}-of-???.bin""" ) )
torch.save(A_ , A_ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(A_ )[0]].dtype )
# Add the last block
_lowercase = os.path.join(A_ , weights_name.replace(".bin" , F"""-{len(A_ )+1:05d}-of-???.bin""" ) )
_lowercase = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(A_ )
_lowercase = rename_fairseq_keys(A_ , A_ )
_lowercase = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(A_ ) == 1:
_lowercase = os.path.join(A_ , A_ )
torch.save(A_ , A_ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(A_ , A_ )
# Otherwise, let's build the index
_lowercase = {}
for idx, shard in enumerate(A_ ):
_lowercase = weights_name.replace(".bin" , F"""-{idx+1:05d}-of-{len(A_ ):05d}.bin""" )
_lowercase = os.path.join(A_ , weights_name.replace(".bin" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(A_ , os.path.join(A_ , A_ ) )
for key in shard:
_lowercase = shard_file
# Add the metadata
_lowercase = {"total_size": total_size}
_lowercase = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(A_ , A_ ) , "w" , encoding="utf-8" ) as f:
_lowercase = json.dumps(A_ , indent=2 , sort_keys=A_ ) + "\n"
f.write(A_ )
return metadata, index
if __name__ == "__main__":
__magic_name__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
__magic_name__ : Union[str, Any] = parser.parse_args()
__magic_name__ , __magic_name__ : str = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__magic_name__ : List[str] = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__magic_name__ : Tuple = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 497 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _lowercase :
def __init__( self : Optional[int] , a : List[str] , a : str=2 , a : Dict=3_2 , a : Tuple=1_6 , a : str=3 , a : Union[str, Any]=True , a : Union[str, Any]=True , a : List[str]=3_2 , a : Union[str, Any]=4 , a : Optional[int]=[0, 1, 2, 3] , a : Dict=4 , a : Optional[Any]=3_7 , a : Tuple="gelu" , a : Optional[Any]=0.1 , a : List[str]=0.1 , a : Tuple=0.0_2 , a : Any=3 , a : Optional[int]=[1, 3_8_4, 2_4, 2_4] , a : int=True , a : Any=None , ):
"""simple docstring"""
__snake_case : str =parent
__snake_case : int =batch_size
__snake_case : Optional[int] =image_size
__snake_case : Optional[Any] =patch_size
__snake_case : Optional[int] =num_channels
__snake_case : List[str] =is_training
__snake_case : Optional[Any] =use_labels
__snake_case : Optional[int] =hidden_size
__snake_case : str =num_hidden_layers
__snake_case : Optional[Any] =backbone_out_indices
__snake_case : Optional[int] =num_attention_heads
__snake_case : List[Any] =intermediate_size
__snake_case : Union[str, Any] =hidden_act
__snake_case : List[Any] =hidden_dropout_prob
__snake_case : Any =attention_probs_dropout_prob
__snake_case : Any =initializer_range
__snake_case : Union[str, Any] =num_labels
__snake_case : int =backbone_featmap_shape
__snake_case : Optional[int] =scope
__snake_case : Union[str, Any] =is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case : str =(image_size // patch_size) ** 2
__snake_case : List[Any] =num_patches + 1
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : Optional[int] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[Any] =None
if self.use_labels:
__snake_case : str =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : List[Any] =self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
__snake_case : Tuple ={
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [9_6, 1_9_2, 3_8_4, 7_6_8],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a , backbone_featmap_shape=self.backbone_featmap_shape , )
def _UpperCamelCase ( self : List[Any] , a : Dict , a : Optional[Any] , a : int ):
"""simple docstring"""
__snake_case : Tuple =DPTModel(config=a )
model.to(a )
model.eval()
__snake_case : List[str] =model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[Any] , a : Optional[int] , a : List[Any] , a : List[str] ):
"""simple docstring"""
__snake_case : Dict =self.num_labels
__snake_case : Optional[Any] =DPTForDepthEstimation(a )
model.to(a )
model.eval()
__snake_case : Any =model(a )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _UpperCamelCase ( self : int , a : str , a : Union[str, Any] , a : Optional[int] ):
"""simple docstring"""
__snake_case : str =self.num_labels
__snake_case : Dict =DPTForSemanticSegmentation(a )
model.to(a )
model.eval()
__snake_case : Optional[Any] =model(a , labels=a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : int =self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : List[str] =config_and_inputs
__snake_case : Tuple ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : str = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_a : Dict = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_a : Optional[int] = False
_a : List[Any] = False
_a : int = False
def _UpperCamelCase ( self : int ):
"""simple docstring"""
__snake_case : Any =DPTModelTester(self )
__snake_case : int =ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _UpperCamelCase ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
pass
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
__snake_case , __snake_case : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[Any] =model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : List[str] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCamelCase ( self : int ):
"""simple docstring"""
__snake_case , __snake_case : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] =model_class(a )
__snake_case : Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[Any] =[*signature.parameters.keys()]
__snake_case : Dict =['''pixel_values''']
self.assertListEqual(arg_names[:1] , a )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
__snake_case : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[Any] =True
if model_class in get_values(a ):
continue
__snake_case : Tuple =model_class(a )
model.to(a )
model.train()
__snake_case : List[str] =self._prepare_for_class(a , a , return_labels=a )
__snake_case : int =model(**a ).loss
loss.backward()
def _UpperCamelCase ( self : str ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int =False
__snake_case : str =True
if model_class in get_values(a ) or not model_class.supports_gradient_checkpointing:
continue
__snake_case : List[Any] =model_class(a )
model.to(a )
model.gradient_checkpointing_enable()
model.train()
__snake_case : List[Any] =self._prepare_for_class(a , a , return_labels=a )
__snake_case : Any =model(**a ).loss
loss.backward()
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[str] =_config_zero_init(a )
for model_class in self.all_model_classes:
__snake_case : List[str] =model_class(config=a )
# Skip the check for the backbone
__snake_case : List[Any] =[]
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__snake_case : Any =[f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
pass
@slow
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__snake_case : Dict =DPTModel.from_pretrained(a )
self.assertIsNotNone(a )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case , __snake_case : int =self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple ='''add'''
with self.assertRaises(a ):
__snake_case : List[Any] =DPTForDepthEstimation(a )
def __lowercase ( ) -> Any:
__snake_case : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class _lowercase ( unittest.TestCase ):
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : Tuple =DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
__snake_case : Dict =DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(a )
__snake_case : List[Any] =prepare_img()
__snake_case : Optional[Any] =image_processor(images=a , return_tensors='''pt''' ).to(a )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] =model(**a )
__snake_case : Union[str, Any] =outputs.predicted_depth
# verify the predicted depth
__snake_case : List[Any] =torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , a )
__snake_case : Union[str, Any] =torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(a )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , a , atol=1e-4 ) )
| 497 |
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _lowercase :
def __init__( self : Optional[Any] , a : List[str] , a : List[Any]=1_3 , a : Any=7 , a : List[str]=True , a : int=True , a : Optional[int]=9_9 , a : List[Any]=3_2 , a : Optional[int]=5 , a : List[Any]=4 , a : Dict=3_7 , a : List[str]="gelu" , a : Union[str, Any]=0.1 , a : str=0.1 , a : List[str]=5_0 , a : Tuple=0.0_2 , a : Union[str, Any]=True , a : int=None , ):
"""simple docstring"""
__snake_case : Optional[int] =parent
__snake_case : Dict =batch_size
__snake_case : Union[str, Any] =seq_length
__snake_case : Optional[Any] =is_training
__snake_case : Any =use_input_mask
__snake_case : Union[str, Any] =vocab_size
__snake_case : Union[str, Any] =hidden_size
__snake_case : int =num_hidden_layers
__snake_case : Optional[int] =num_attention_heads
__snake_case : int =intermediate_size
__snake_case : str =hidden_act
__snake_case : str =hidden_dropout_prob
__snake_case : Tuple =attention_probs_dropout_prob
__snake_case : Tuple =max_position_embeddings
__snake_case : Optional[Any] =initializer_range
__snake_case : List[str] =use_labels
__snake_case : List[Any] =scope
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[int] =None
if self.use_input_mask:
__snake_case : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__snake_case : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Any =self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict =self.prepare_config_and_inputs()
__snake_case : Dict =True
__snake_case : int =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__snake_case : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCamelCase ( self : Optional[Any] , a : Union[str, Any] , a : Optional[int] , a : int , a : Tuple , **a : Any , ):
"""simple docstring"""
__snake_case : List[Any] =BertGenerationEncoder(config=a )
model.to(a )
model.eval()
__snake_case : Dict =model(a , attention_mask=a )
__snake_case : Tuple =model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Tuple , a : Union[str, Any] , a : Optional[Any] , a : str , a : List[Any] , a : Optional[int] , a : Optional[Any] , **a : int , ):
"""simple docstring"""
__snake_case : Any =True
__snake_case : List[str] =BertGenerationEncoder(config=a )
model.to(a )
model.eval()
__snake_case : Union[str, Any] =model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
__snake_case : str =model(
a , attention_mask=a , encoder_hidden_states=a , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Any , a : List[str] , a : Dict , a : Optional[Any] , a : Tuple , a : Dict , a : Tuple , **a : Tuple , ):
"""simple docstring"""
__snake_case : List[Any] =True
__snake_case : Any =True
__snake_case : int =BertGenerationDecoder(config=a ).to(a ).eval()
# first forward pass
__snake_case : List[str] =model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
__snake_case : Optional[int] =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : List[str] =ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : List[Any] =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case : Tuple =torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case : List[Any] =torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case : Tuple =model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['''hidden_states'''][0]
__snake_case : Any =model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['''hidden_states'''][0]
# select random slice
__snake_case : int =ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case : int =output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : int =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1e-3 ) )
def _UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Dict , a : Optional[Any] , a : List[str] , *a : Dict , ):
"""simple docstring"""
__snake_case : List[str] =BertGenerationDecoder(a )
model.to(a )
model.eval()
__snake_case : Any =model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case : Any =self.prepare_config_and_inputs()
__snake_case : List[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_a : List[Any] = (BertGenerationDecoder,) if is_torch_available() else ()
_a : Union[str, Any] = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
__snake_case : int =BertGenerationEncoderTester(self )
__snake_case : Union[str, Any] =ConfigTester(self , config_class=a , hidden_size=3_7 )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case : Tuple =self.model_tester.prepare_config_and_inputs()
__snake_case : int ='''bert'''
self.model_tester.create_and_check_model(a , a , a , a )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : List[str] =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : Dict =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*a )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_decoder()
__snake_case : Tuple =None
self.model_tester.create_and_check_model_as_decoder(
a , a , a , a , a , a , )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : Tuple =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*a )
@slow
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : Optional[int] =BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(a )
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
__snake_case : List[Any] =BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
__snake_case : Any =torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
__snake_case : str =model(a )[0]
__snake_case : List[str] =torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , a )
__snake_case : int =torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
__snake_case : int =BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
__snake_case : Dict =torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
__snake_case : List[str] =model(a )[0]
__snake_case : str =torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , a )
__snake_case : Optional[int] =torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
| 497 | 1 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 49 |
from collections import Counter
from timeit import timeit
def lowercase_ ( _UpperCamelCase = "" , ):
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def lowercase_ ( _UpperCamelCase = "" ):
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return True
__lowercase = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__lowercase = {}
for character in lower_case_input_str:
__lowercase = character_freq_dict.get(_UpperCamelCase , 0 ) + 1
__lowercase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowercase_ ( _UpperCamelCase = "" ):
'''simple docstring'''
print('''\nFor string = ''' , _UpperCamelCase , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(_UpperCamelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(_UpperCamelCase ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
a : int = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
a : Tuple = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 639 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Tuple )-> Tuple:
'''simple docstring'''
A__ = 1_0
def snake_case__ ( self : Optional[int] )-> Union[str, Any]:
'''simple docstring'''
A__ = [1, 2, 3, 4]
A__ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowercase_,self.block_size,0 ),lowercase_ )
def snake_case__ ( self : str )-> Union[str, Any]:
'''simple docstring'''
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(lowercase_,self.block_size,0 ),lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Dict:
'''simple docstring'''
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(lowercase_,self.block_size,0 ),lowercase_ )
def snake_case__ ( self : Any )-> List[str]:
'''simple docstring'''
A__ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
A__ , A__ = process_story(lowercase_ )
self.assertEqual(lowercase_,[] )
def snake_case__ ( self : Union[str, Any] )-> int:
'''simple docstring'''
A__ = ''
A__ , A__ = process_story(lowercase_ )
self.assertEqual(lowercase_,[] )
self.assertEqual(lowercase_,[] )
def snake_case__ ( self : List[Any] )-> List[Any]:
'''simple docstring'''
A__ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
A__ , A__ = process_story(lowercase_ )
A__ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(lowercase_,lowercase_ )
A__ = ['It was the best of times.']
self.assertEqual(lowercase_,lowercase_ )
def snake_case__ ( self : Optional[int] )-> List[str]:
'''simple docstring'''
A__ = torch.tensor([1, 2, 3, 4] )
A__ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowercase_,0 ).numpy(),expected.numpy() )
def snake_case__ ( self : Any )-> str:
'''simple docstring'''
A__ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
A__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase_,2_3 ).numpy(),expected.numpy() )
def snake_case__ ( self : Tuple )-> Tuple:
'''simple docstring'''
A__ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase_,1 ).numpy(),expected.numpy() )
def snake_case__ ( self : int )-> Dict:
'''simple docstring'''
A__ = 1_0_1
A__ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
A__ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A__ = compute_token_type_ids(lowercase_,lowercase_ )
np.testing.assert_array_equal(lowercase_,lowercase_ )
| 712 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowercase_ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowercase_ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowercase_ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : Any )-> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string',id='token' ),id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string',id='token' ),id='sequence' ),id='references' ),
} ),)
def snake_case__ ( self : List[str],lowercase_ : List[List[List[str]]],lowercase_ : List[List[str]],lowercase_ : int = 1,lowercase_ : int = 4,)-> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowercase_,hypotheses=lowercase_,min_len=lowercase_,max_len=lowercase_ )
}
| 586 | 0 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowerCAmelCase__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Dict , UpperCamelCase_ : float , UpperCamelCase_ : Callable , UpperCamelCase_ : int , UpperCamelCase_ : float = 1.0 , UpperCamelCase_ : str = None , ) -> Any:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : Union[str, Any] = initial_learning_rate
lowerCamelCase_ : Union[str, Any] = warmup_steps
lowerCamelCase_ : int = power
lowerCamelCase_ : int = decay_schedule_fn
lowerCamelCase_ : Union[str, Any] = name
def __call__( self : Optional[Any] , UpperCamelCase_ : Dict ) -> int:
"""simple docstring"""
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCamelCase_ : Dict = tf.cast(UpperCamelCase_ , tf.floataa )
lowerCamelCase_ : int = tf.cast(self.warmup_steps , tf.floataa )
lowerCamelCase_ : Dict = global_step_float / warmup_steps_float
lowerCamelCase_ : int = self.initial_learning_rate * tf.math.pow(UpperCamelCase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase_ , )
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 0.9 , __UpperCAmelCase = 0.999 , __UpperCAmelCase = 1E-8 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = None , ):
"""simple docstring"""
lowerCamelCase_ : int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__UpperCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__UpperCAmelCase , )
if num_warmup_steps:
lowerCamelCase_ : int = WarmUp(
initial_learning_rate=__UpperCAmelCase , decay_schedule_fn=__UpperCAmelCase , warmup_steps=__UpperCAmelCase , )
if weight_decay_rate > 0.0:
lowerCamelCase_ : Tuple = AdamWeightDecay(
learning_rate=__UpperCAmelCase , weight_decay_rate=__UpperCAmelCase , beta_a=__UpperCAmelCase , beta_a=__UpperCAmelCase , epsilon=__UpperCAmelCase , clipnorm=__UpperCAmelCase , global_clipnorm=__UpperCAmelCase , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=__UpperCAmelCase , )
else:
lowerCamelCase_ : int = tf.keras.optimizers.Adam(
learning_rate=__UpperCAmelCase , beta_a=__UpperCAmelCase , beta_a=__UpperCAmelCase , epsilon=__UpperCAmelCase , clipnorm=__UpperCAmelCase , global_clipnorm=__UpperCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __init__( self : str , UpperCamelCase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCamelCase_ : float = 0.9 , UpperCamelCase_ : float = 0.999 , UpperCamelCase_ : float = 1e-7 , UpperCamelCase_ : bool = False , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "AdamWeightDecay" , **UpperCamelCase_ : str , ) -> Optional[int]:
"""simple docstring"""
super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = weight_decay_rate
lowerCamelCase_ : Optional[int] = include_in_weight_decay
lowerCamelCase_ : Any = exclude_from_weight_decay
@classmethod
def __UpperCamelCase ( cls : List[Any] , UpperCamelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = {'''WarmUp''': WarmUp}
return super(UpperCamelCase_ , cls ).from_config(UpperCamelCase_ , custom_objects=UpperCamelCase_ )
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
super(UpperCamelCase_ , self )._prepare_local(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : int = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def __UpperCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : str=None , **UpperCamelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : str = list(zip(*UpperCamelCase_ ) )
return super(UpperCamelCase_ , self ).apply_gradients(zip(UpperCamelCase_ , UpperCamelCase_ ) , name=UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCamelCase_ : Dict = apply_state or {}
lowerCamelCase_ : Optional[Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCamelCase_ : List[Any] = self._fallback_apply_state(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __UpperCamelCase ( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : str=None ) -> int:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Dict = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCamelCase_ : Any = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_dense(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=None ) -> str:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Tuple = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCamelCase_ : Any = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_sparse(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : str = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def __UpperCamelCase ( self : List[Any] , UpperCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return False
return True
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __init__( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : str = []
lowerCamelCase_ : List[Any] = None
@property
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
if self._accum_steps is None:
lowerCamelCase_ : List[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : int , UpperCamelCase_ : Optional[int] ) -> str:
"""simple docstring"""
if not self._gradients:
lowerCamelCase_ : str = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCamelCase_ ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCamelCase_ ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase_ )}""" )
for accum_gradient, gradient in zip(self._gradients , UpperCamelCase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCamelCase_ )
self._accum_steps.assign_add(1 )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCamelCase_ ) )
| 501 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowerCamelCase : str = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = test_results.split(''' ''' )
lowerCamelCase_ : Tuple = 0
lowerCamelCase_ : Optional[int] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase_ : Optional[Any] = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Tuple = {}
lowerCamelCase_ : List[Any] = None
lowerCamelCase_ : Any = False
for line in failures_short_lines.split('''\n''' ):
if re.search(R'''_ \[doctest\]''' , __UpperCAmelCase ):
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : Optional[Any] = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
lowerCamelCase_ : str = line
lowerCamelCase_ : List[Any] = False
return failures
class lowerCAmelCase__ :
def __init__( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = title
lowerCamelCase_ : Dict = doc_test_results['''time_spent'''].split(''',''' )[0]
lowerCamelCase_ : str = doc_test_results['''success''']
lowerCamelCase_ : int = doc_test_results['''failures''']
lowerCamelCase_ : List[Any] = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase_ : Union[str, Any] = doc_test_results
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ : Dict = [self._time_spent]
lowerCamelCase_ : Optional[int] = 0
for time in time_spent:
lowerCamelCase_ : Tuple = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(UpperCamelCase_ ) == 1:
lowerCamelCase_ : int = [0, 0, time_parts[0]]
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return F"""{int(UpperCamelCase_ )}h{int(UpperCamelCase_ )}m{int(UpperCamelCase_ )}s"""
@property
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Any = 40
lowerCamelCase_ : Tuple = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(UpperCamelCase_ , UpperCamelCase_ )}
lowerCamelCase_ : int = ''''''
for category, failures in category_failures.items():
if len(UpperCamelCase_ ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(UpperCamelCase_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ : str = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(UpperCamelCase_ )
@staticmethod
def __UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Any = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(UpperCamelCase_ )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=UpperCamelCase_ , )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
lowerCamelCase_ : Optional[int] = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else '''All tests passed.'''
lowerCamelCase_ : Union[str, Any] = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=UpperCamelCase_ , )
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : str = ''''''
for key, value in failures.items():
lowerCamelCase_ : List[Any] = value[:200] + ''' [Truncated]''' if len(UpperCamelCase_ ) > 250 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
lowerCamelCase_ : List[str] = job_name
lowerCamelCase_ : List[str] = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
lowerCamelCase_ : List[str] = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __UpperCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
lowerCamelCase_ : Dict = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
lowerCamelCase_ : List[Any] = sorted(self.doc_test_results.items() , key=lambda UpperCamelCase_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
lowerCamelCase_ : str = F"""*Num failures* :{len(job_result['failed'] )} \n"""
lowerCamelCase_ : Any = job_result['''failures''']
lowerCamelCase_ : Any = self.get_reply_blocks(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , text=UpperCamelCase_ )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F"""Results for {job}""" , blocks=UpperCamelCase_ , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : Any = os.environ['''GITHUB_RUN_ID''']
lowerCamelCase_ : Dict = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
lowerCamelCase_ : Tuple = requests.get(__UpperCAmelCase ).json()
lowerCamelCase_ : Union[str, Any] = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
lowerCamelCase_ : Union[str, Any] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__UpperCAmelCase ):
lowerCamelCase_ : Optional[Any] = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , __UpperCAmelCase )
return {}
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : List[Any] = {}
if os.path.exists(__UpperCAmelCase ):
lowerCamelCase_ : Optional[Any] = os.listdir(__UpperCAmelCase )
for file in files:
try:
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , encoding='''utf-8''' ) as f:
lowerCamelCase_ : str = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(__UpperCAmelCase , __UpperCAmelCase )}.""" ) from e
return _artifact
def __snake_case ():
"""simple docstring"""
class lowerCAmelCase__ :
def __init__( self : Any , UpperCamelCase_ : str ) -> int:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = name
lowerCamelCase_ : str = []
def __str__( self : int ) -> Tuple:
"""simple docstring"""
return self.name
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : str ) -> List[str]:
"""simple docstring"""
self.paths.append({'''name''': self.name, '''path''': path} )
lowerCamelCase_ : Dict[str, Artifact] = {}
lowerCamelCase_ : List[str] = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase_ : List[Any] = directory
if artifact_name not in _available_artifacts:
lowerCamelCase_ : List[Any] = Artifact(__UpperCAmelCase )
_available_artifacts[artifact_name].add_path(__UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
__lowerCamelCase : List[Any] = get_job_links()
__lowerCamelCase : str = retrieve_available_artifacts()
__lowerCamelCase : Tuple = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowerCamelCase : Optional[Any] = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowerCamelCase : str = github_actions_job_links.get("""run_doctests""")
__lowerCamelCase : Optional[int] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowerCamelCase : Tuple = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = handle_test_results(artifact["""stats"""])
__lowerCamelCase : Union[str, Any] = failed
__lowerCamelCase : str = success
__lowerCamelCase : int = time_spent[1:-1] + """, """
__lowerCamelCase : List[Any] = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowerCamelCase : List[Any] = line.replace("""FAILED """, """""")
__lowerCamelCase : List[str] = line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = line.split("""::""")
else:
__lowerCamelCase , __lowerCamelCase : str = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowerCamelCase : Tuple = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowerCamelCase : List[Any] = all_failures[test] if test in all_failures else """N/A"""
__lowerCamelCase : Optional[int] = failure
break
__lowerCamelCase : Dict = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 501 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class A_ ( A_ ):
'''simple docstring'''
def __init__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = False , snake_case = False , snake_case = None , snake_case = None , **snake_case , ):
super().__init__(
snake_case , split=snake_case , features=snake_case , cache_dir=snake_case , keep_in_memory=snake_case , streaming=snake_case , num_proc=snake_case , **snake_case , )
lowercase = field
lowercase = path_or_paths if isinstance(snake_case , snake_case ) else {self.split: path_or_paths}
lowercase = Json(
cache_dir=snake_case , data_files=snake_case , features=snake_case , field=snake_case , **snake_case , )
def SCREAMING_SNAKE_CASE__ ( self ):
# Build iterable dataset
if self.streaming:
lowercase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase = None
lowercase = None
lowercase = None
lowercase = None
self.builder.download_and_prepare(
download_config=snake_case , download_mode=snake_case , verification_mode=snake_case , base_path=snake_case , num_proc=self.num_proc , )
lowercase = self.builder.as_dataset(
split=self.split , verification_mode=snake_case , in_memory=self.keep_in_memory )
return dataset
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case = None , snake_case = None , **snake_case , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
lowercase = dataset
lowercase = path_or_buf
lowercase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase = num_proc
lowercase = """utf-8"""
lowercase = to_json_kwargs
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.to_json_kwargs.pop('path_or_buf' , snake_case )
lowercase = self.to_json_kwargs.pop('orient' , 'records' )
lowercase = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
lowercase = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
lowercase = self.to_json_kwargs.pop('compression' , snake_case )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=snake_case ) as buffer:
lowercase = self._write(file_obj=snake_case , orient=snake_case , lines=snake_case , index=snake_case , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
' was passed. Please provide a local path instead.' )
lowercase = self._write(
file_obj=self.path_or_buf , orient=snake_case , lines=snake_case , index=snake_case , **self.to_json_kwargs )
return written
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = args
lowercase = query_table(
table=self.dataset.data , key=slice(snake_case , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase = batch.to_pandas().to_json(
path_or_buf=snake_case , orient=snake_case , lines=snake_case , index=snake_case , **snake_case )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , **snake_case , ):
lowercase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
lowercase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(snake_case )
else:
lowercase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , snake_case , snake_case )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(snake_case )
return written
| 708 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
UpperCAmelCase = True
except ImportError:
UpperCAmelCase = False
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case ):
lowercase = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=snake_case , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=snake_case , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=snake_case )
def __init__( self , snake_case , snake_case , snake_case=None , *snake_case ):
lowercase = testing
lowercase = testing_file
lowercase = path
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowercase = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(snake_case ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
lowercase = (
Path(snake_case ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowercase = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(snake_case ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
lowercase = json.load(snake_case )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=snake_case , extra_context=snake_case , )
lowercase = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
lowercase = json.load(snake_case )
lowercase = configuration['lowercase_modelname']
lowercase = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'''{directory}/configuration.json''' )
lowercase = 'PyTorch' in generate_tensorflow_pytorch_and_flax
lowercase = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
lowercase = 'Flax' in generate_tensorflow_pytorch_and_flax
lowercase = F'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(snake_case , exist_ok=snake_case )
os.makedirs(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=snake_case )
# Tests require submodules as they have parent imports
with open(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , 'w' ):
pass
shutil.move(
F'''{directory}/__init__.py''' , F'''{model_dir}/__init__.py''' , )
shutil.move(
F'''{directory}/configuration_{lowercase_model_name}.py''' , F'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(snake_case ):
with open(snake_case , 'r' ) as f:
lowercase = f.readlines()
with open(snake_case , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(snake_case )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_tf_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_flax_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/{lowercase_model_name}.md''' , F'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
F'''{directory}/tokenization_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(snake_case , snake_case , snake_case ):
# Create temp file
lowercase , lowercase = mkstemp()
lowercase = False
with fdopen(snake_case , 'w' ) as new_file:
with open(snake_case ) as old_file:
for line in old_file:
new_file.write(snake_case )
if line_to_copy_below in line:
lowercase = True
for line_to_copy in lines_to_copy:
new_file.write(snake_case )
if not line_found:
raise ValueError(F'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(snake_case , snake_case )
# Remove original file
remove(snake_case )
# Move new file
move(snake_case , snake_case )
def skip_units(snake_case ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(snake_case ):
with open(snake_case ) as datafile:
lowercase = []
lowercase = False
lowercase = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowercase = line.split('"' )[1]
lowercase = skip_units(snake_case )
elif "# Below: " in line and "##" not in line:
lowercase = line.split('"' )[1]
lowercase = skip_units(snake_case )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(snake_case , snake_case , snake_case )
lowercase = []
elif "# Replace with" in line and "##" not in line:
lowercase = []
elif "##" not in line:
lines_to_copy.append(snake_case )
remove(snake_case )
replace_in_files(F'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(snake_case )
| 565 | 0 |
'''simple docstring'''
def __UpperCamelCase ( lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[int] ):
"""simple docstring"""
if index == r:
for j in range(a__ ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
a_ = arr[i]
combination_util(a__ , a__ , a__ , index + 1 , a__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(a__ , a__ , a__ , a__ , a__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __UpperCamelCase ( lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Any ):
"""simple docstring"""
a_ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(a__ , a__ , a__ , 0 , a__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__lowerCAmelCase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 536 | import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
lowerCamelCase__ = get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
__A = '''all_checks'''
__A = '''basic_checks'''
__A = '''no_checks'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase__ ( a__ , a__ , a__=None ) ->int:
'''simple docstring'''
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(a__ ) - set(a__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(a__ ) - set(a__ ) ) )
if len(set(a__ ) - set(a__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(a__ ) - set(a__ ) ) )
_UpperCamelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_UpperCamelCase = " for " + verification_name if verification_name is not None else ""
if len(a__ ) > 0:
raise NonMatchingChecksumError(
f'Checksums didn\'t match{for_verification_name}:\n'
f'{bad_urls}\n'
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase__ ( a__ , a__ ) ->Dict:
'''simple docstring'''
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(a__ ) - set(a__ ) ) > 0:
raise ExpectedMoreSplits(str(set(a__ ) - set(a__ ) ) )
if len(set(a__ ) - set(a__ ) ) > 0:
raise UnexpectedSplits(str(set(a__ ) - set(a__ ) ) )
_UpperCamelCase = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(a__ ) > 0:
raise NonMatchingSplitsSizesError(str(a__ ) )
logger.info("All the splits matched successfully." )
def lowerCAmelCase__ ( a__ , a__ = True ) ->dict:
'''simple docstring'''
if record_checksum:
_UpperCamelCase = shaaaa()
with open(a__ , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(a__ )
_UpperCamelCase = m.hexdigest()
else:
_UpperCamelCase = None
return {"num_bytes": os.path.getsize(a__ ), "checksum": checksum}
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 547 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = []
A__ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
A__ = result + left + right
return input_list
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if len(__UpperCamelCase ) <= 1:
return input_list
A__ = list(__UpperCamelCase )
# iteration for two-way merging
A__ = 2
while p <= len(__UpperCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase ):
A__ = i
A__ = i + p - 1
A__ = (low + high + 1) // 2
A__ = merge(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# final merge of last two parts
if p * 2 >= len(__UpperCamelCase ):
A__ = i
A__ = merge(__UpperCamelCase , 0 , __UpperCamelCase , len(__UpperCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__lowerCamelCase = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
__lowerCamelCase = []
else:
__lowerCamelCase = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 721 | """simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
@property
def snake_case__ ( self ) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self ) -> Union[str, Any]:
A__ = ort.SessionOptions()
A__ = False
return options
def snake_case__ ( self ) -> str:
A__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
A__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
A__ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=__UpperCAmelCase ,feature_extractor=__UpperCAmelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A__ = 'A red cat sitting on a park bench'
A__ = np.random.RandomState(0 )
A__ = pipe(
prompt=__UpperCAmelCase ,image=__UpperCAmelCase ,mask_image=__UpperCAmelCase ,strength=0.7_5 ,guidance_scale=7.5 ,num_inference_steps=15 ,generator=__UpperCAmelCase ,output_type='np' ,)
A__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 536 | 0 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
a_ : Union[str, Any] = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE = XLNetConfig.from_json_file(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''')
SCREAMING_SNAKE_CASE = finetuning_task
SCREAMING_SNAKE_CASE = GLUE_TASKS_NUM_LABELS[finetuning_task]
SCREAMING_SNAKE_CASE = XLNetForSequenceClassification(_UpperCAmelCase)
elif "squad" in finetuning_task:
SCREAMING_SNAKE_CASE = finetuning_task
SCREAMING_SNAKE_CASE = XLNetForQuestionAnswering(_UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE = XLNetLMHeadModel(_UpperCAmelCase)
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# Save pytorch-model
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , _UpperCAmelCase)
print(F'''Save PyTorch model to {os.path.abspath(_UpperCAmelCase)}''')
torch.save(model.state_dict() , _UpperCAmelCase)
print(F'''Save configuration file to {os.path.abspath(_UpperCAmelCase)}''')
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as f:
f.write(config.to_json_string())
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
a_ : Dict = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 73 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _snake_case ( A__ ):
_lowercase : Optional[Any] = '''decision_transformer'''
_lowercase : str = ['''past_key_values''']
_lowercase : Union[str, Any] = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a=17 , a=4 , a=128 , a=4096 , a=True , a=1 , a=1024 , a=3 , a=1 , a=None , a="relu" , a=0.1 , a=0.1 , a=0.1 , a=1E-5 , a=0.02 , a=True , a=True , a=5_0256 , a=5_0256 , a=False , a=False , **a , ) -> List[str]:
SCREAMING_SNAKE_CASE = state_dim
SCREAMING_SNAKE_CASE = act_dim
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = max_ep_len
SCREAMING_SNAKE_CASE = action_tanh
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_inner
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scale_attn_weights
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(bos_token_id=a , eos_token_id=a , **a)
| 73 | 1 |
"""simple docstring"""
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
SCREAMING_SNAKE_CASE_ = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def A__ ( A__ ) -> List[Any]:
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
if args.check_lib:
SCREAMING_SNAKE_CASE_ = importlib.import_module('''transformers''')
SCREAMING_SNAKE_CASE_ = Path(transformers_module.__file__).parent
else:
SCREAMING_SNAKE_CASE_ = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 579 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE_ = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Tuple = ["input_ids", "attention_mask"]
A__ : List[Any] = RobertaTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> str:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_UpperCAmelCase = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**snake_case_ )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = "post_processor"
_UpperCAmelCase = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase = tuple(state["cls"] )
_UpperCAmelCase = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(snake_case_ , state.pop("type" ) )
_UpperCAmelCase = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def __A ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self , snake_case_ ) -> List[Any]:
_UpperCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_UpperCAmelCase = value
def __A ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_UpperCAmelCase = kwargs.get("is_split_into_words" , snake_case_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __A ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_UpperCAmelCase = kwargs.get("is_split_into_words" , snake_case_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __A ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_UpperCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __A ( self , snake_case_ , snake_case_=None ) -> Tuple:
_UpperCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __A ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 579 | 1 |
'''simple docstring'''
import enum
import shutil
import sys
_lowercase , _lowercase = shutil.get_terminal_size()
_lowercase = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class _lowercase ( enum.Enum ):
_UpperCAmelCase = 0
_UpperCAmelCase = 1
def __UpperCamelCase ( a : Optional[Any] , a : Tuple="" ) ->Tuple:
sys.stdout.write(str(a ) + end )
sys.stdout.flush()
def __UpperCamelCase ( a : int , a : Dict , a : Optional[Any]="" ) ->Dict:
forceWrite(f"""\u001b[{color}m{content}\u001b[0m""" , a )
def __UpperCamelCase ( ) ->Dict:
forceWrite('''\r''' )
def __UpperCamelCase ( a : int , a : str ) ->Optional[int]:
forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def __UpperCamelCase ( ) ->Union[str, Any]:
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def __UpperCamelCase ( ) ->Any:
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 342 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __UpperCamelCase ( a : Any ) ->Union[str, Any]:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __UpperCamelCase ( a : Any , a : Tuple ) ->List[Any]:
snake_case = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
snake_case = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
snake_case = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
snake_case = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
snake_case = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
snake_case = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
snake_case = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
snake_case = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
snake_case = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
snake_case = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
snake_case = key.replace('''image_encoder.module''' , '''flava.image_model''' )
snake_case = key.replace('''text_encoder.module''' , '''flava.text_model''' )
snake_case = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
snake_case = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
snake_case = key.replace('''text_projection''' , '''flava.text_projection''' )
snake_case = key.replace('''image_projection''' , '''flava.image_projection''' )
snake_case = value.float()
for key, value in codebook_state_dict.items():
snake_case = value
return upgrade
@torch.no_grad()
def __UpperCamelCase ( a : Tuple , a : List[str] , a : Optional[Any] , a : Tuple=None ) ->Union[str, Any]:
if config_path is not None:
snake_case = FlavaConfig.from_pretrained(a )
else:
snake_case = FlavaConfig()
snake_case = FlavaForPreTraining(a ).eval()
snake_case = convert_dalle_checkpoint(a , a , save_checkpoint=a )
if os.path.exists(a ):
snake_case = torch.load(a , map_location='''cpu''' )
else:
snake_case = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' )
snake_case = upgrade_state_dict(a , a )
hf_model.load_state_dict(a )
snake_case = hf_model.state_dict()
snake_case = count_parameters(a )
snake_case = count_parameters(a ) + count_parameters(a )
assert torch.allclose(a , a , atol=1e-3 )
hf_model.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_lowercase = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 342 | 1 |
from PIL import Image
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> Image:
a__ : str = image.size
a__ : Any = 0
a__ : Tuple = image.load()
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
a__ : Any = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__UpperCamelCase ):
for i in range(__UpperCamelCase ):
a__ : str = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowerCamelCase = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 712 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> Dict:
a__ : List[Any] = checkpoint
a__ : List[Any] = {}
a__ : int = vae_state_dict["encoder.conv_in.weight"]
a__ : Any = vae_state_dict["encoder.conv_in.bias"]
a__ : Union[str, Any] = vae_state_dict["encoder.conv_out.weight"]
a__ : Optional[Any] = vae_state_dict["encoder.conv_out.bias"]
a__ : List[str] = vae_state_dict["encoder.norm_out.weight"]
a__ : Optional[int] = vae_state_dict["encoder.norm_out.bias"]
a__ : Optional[Any] = vae_state_dict["decoder.conv_in.weight"]
a__ : Dict = vae_state_dict["decoder.conv_in.bias"]
a__ : Union[str, Any] = vae_state_dict["decoder.conv_out.weight"]
a__ : Optional[int] = vae_state_dict["decoder.conv_out.bias"]
a__ : Dict = vae_state_dict["decoder.norm_out.weight"]
a__ : int = vae_state_dict["decoder.norm_out.bias"]
a__ : Any = vae_state_dict["quant_conv.weight"]
a__ : Any = vae_state_dict["quant_conv.bias"]
a__ : str = vae_state_dict["post_quant_conv.weight"]
a__ : Any = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
a__ : Dict = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
a__ : Optional[int] = {
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
a__ : Dict = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
a__ : int = {
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(__UpperCamelCase )
}
for i in range(__UpperCamelCase ):
a__ : Tuple = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
a__ : Optional[Any] = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
a__ : List[str] = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
a__ : Optional[Any] = renew_vae_resnet_paths(__UpperCamelCase )
a__ : List[str] = {"old": F'down.{i}.block', "new": F'down_blocks.{i}.resnets'}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
a__ : Any = [key for key in vae_state_dict if "encoder.mid.block" in key]
a__ : Optional[int] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a__ : List[Any] = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
a__ : Dict = renew_vae_resnet_paths(__UpperCamelCase )
a__ : Union[str, Any] = {"old": F'mid.block_{i}', "new": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
a__ : Dict = [key for key in vae_state_dict if "encoder.mid.attn" in key]
a__ : Optional[int] = renew_vae_attention_paths(__UpperCamelCase )
a__ : str = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
for i in range(__UpperCamelCase ):
a__ : Optional[Any] = num_up_blocks - 1 - i
a__ : str = [
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
a__ : Dict = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
a__ : Optional[int] = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
a__ : int = renew_vae_resnet_paths(__UpperCamelCase )
a__ : Optional[Any] = {"old": F'up.{block_id}.block', "new": F'up_blocks.{i}.resnets'}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
a__ : Union[str, Any] = [key for key in vae_state_dict if "decoder.mid.block" in key]
a__ : Dict = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a__ : List[str] = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
a__ : List[str] = renew_vae_resnet_paths(__UpperCamelCase )
a__ : Union[str, Any] = {"old": F'mid.block_{i}', "new": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
a__ : int = [key for key in vae_state_dict if "decoder.mid.attn" in key]
a__ : str = renew_vae_attention_paths(__UpperCamelCase )
a__ : List[str] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
return new_checkpoint
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , ) -> str:
# Only support V1
a__ : Optional[int] = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
a__ : Any = io.BytesIO(r.content )
a__ : int = OmegaConf.load(__UpperCamelCase )
a__ : Any = 5_12
a__ : str = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
a__ : Optional[Any] = {}
with safe_open(__UpperCamelCase , framework="pt" , device="cpu" ) as f:
for key in f.keys():
a__ : Tuple = f.get_tensor(__UpperCamelCase )
else:
a__ : List[Any] = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )["state_dict"]
# Convert the VAE model.
a__ : Optional[int] = create_vae_diffusers_config(__UpperCamelCase , image_size=__UpperCamelCase )
a__ : List[Any] = custom_convert_ldm_vae_checkpoint(__UpperCamelCase , __UpperCamelCase )
a__ : Optional[int] = AutoencoderKL(**__UpperCamelCase )
vae.load_state_dict(__UpperCamelCase )
vae.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
lowerCamelCase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 207 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ['pixel_values']
def __init__( self : Tuple ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : PILImageResampling = PIL.Image.BICUBIC ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : Union[int, float] = 1 / 2_5_5 ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,**lowercase__ : Optional[Any] ,):
super().__init__(**lowercase__ )
__lowercase = size if size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
__lowercase = get_size_dict(lowercase__ )
__lowercase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__lowercase = get_size_dict(lowercase__ ,param_name='''crop_size''' )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : PILImageResampling = PIL.Image.BICUBIC ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ,):
__lowercase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" )
return resize(
lowercase__ ,size=(size['''height'''], size['''width''']) ,resample=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Union[str, Any] ,):
__lowercase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(lowercase__ ,size=(size['''height'''], size['''width''']) ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : np.ndarray ,lowercase__ : Union[int, float] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Any ,):
return rescale(lowercase__ ,scale=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : np.ndarray ,lowercase__ : Union[float, List[float]] ,lowercase__ : Union[float, List[float]] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ,):
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : ImageInput ,lowercase__ : bool = None ,lowercase__ : Dict[str, int] = None ,lowercase__ : str=None ,lowercase__ : bool = None ,lowercase__ : Dict[str, int] = None ,lowercase__ : bool = None ,lowercase__ : float = None ,lowercase__ : bool = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : Union[str, Any] ,):
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = resample if resample is not None else self.resample
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(lowercase__ )
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(lowercase__ ,param_name='''crop_size''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=lowercase__ ,size=lowercase__ ,resample=lowercase__ ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=lowercase__ ,size=lowercase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=lowercase__ ,scale=lowercase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ,mean=lowercase__ ,std=lowercase__ ) for image in images]
__lowercase = [to_channel_dimension_format(lowercase__ ,lowercase__ ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ ,tensor_type=lowercase__ )
| 41 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=4_00 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=1 / 2_55 , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_normalize
snake_case__ = image_mean
snake_case__ = image_std
snake_case__ = do_pad
def A_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def A_ ( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
snake_case__ = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
snake_case__ , snake_case__ = image.size
else:
snake_case__ , snake_case__ = image.shape[1], image.shape[2]
if w < h:
snake_case__ = int(self.size["shortest_edge"] * h / w )
snake_case__ = self.size["shortest_edge"]
elif w > h:
snake_case__ = self.size["shortest_edge"]
snake_case__ = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ = self.size["shortest_edge"]
snake_case__ = self.size["shortest_edge"]
else:
snake_case__ = []
for image in image_inputs:
snake_case__ , snake_case__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
snake_case__ = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
_A : Optional[Any] = DetrImageProcessor if is_vision_available() else None
def A_ ( self ):
snake_case__ = DetrImageProcessingTester(self )
@property
def A_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "rescale_factor" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
def A_ ( self ):
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
snake_case__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def A_ ( self ):
pass
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
snake_case__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A_ ( self ):
# prepare image and target
snake_case__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {"image_id": 3_97_69, "annotations": target}
# encode them
snake_case__ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
snake_case__ = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def A_ ( self ):
# prepare image, target and masks_path
snake_case__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
snake_case__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
snake_case__ = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
snake_case__ = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 276 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''▁'''
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
SCREAMING_SNAKE_CASE__ = {
'''facebook/nllb-200-distilled-600M''': 1_0_2_4,
}
# fmt: off
SCREAMING_SNAKE_CASE__ = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = VOCAB_FILES_NAMES
A__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = ["input_ids", "attention_mask"]
A__ : List[int] = []
A__ : List[int] = []
def __init__( self : Tuple , _snake_case : str , _snake_case : List[str]="<s>" , _snake_case : int="</s>" , _snake_case : Optional[Any]="</s>" , _snake_case : Union[str, Any]="<s>" , _snake_case : Any="<unk>" , _snake_case : str="<pad>" , _snake_case : Optional[Any]="<mask>" , _snake_case : Tuple=None , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : Optional[Dict[str, Any]] = None , _snake_case : List[str]=None , _snake_case : Optional[int]=False , **_snake_case : List[str] , ):
"""simple docstring"""
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
A__ = legacy_behaviour
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , tokenizer_file=_snake_case , src_lang=_snake_case , tgt_lang=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_snake_case , **_snake_case , )
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
A__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
A__ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A__ = 1
A__ = len(self.sp_model )
A__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_snake_case )
}
A__ = {v: k for k, v in self.lang_code_to_id.items()}
A__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A__ = src_lang if src_lang is not None else 'eng_Latn'
A__ = self.lang_code_to_id[self._src_lang]
A__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Optional[Any] ):
"""simple docstring"""
A__ = self.__dict__.copy()
A__ = None
A__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _a ( self : Tuple ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _a ( self : str ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _a ( self : Any , _snake_case : str ):
"""simple docstring"""
A__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self : Optional[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
A__ = [1] * len(self.prefix_tokens )
A__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_snake_case )) + suffix_ones
return prefix_ones + ([0] * len(_snake_case )) + ([0] * len(_snake_case )) + suffix_ones
def _a ( self : str , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self : str , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : List[Any] , _snake_case : Any , _snake_case : str , _snake_case : Optional[str] , _snake_case : Optional[str] , **_snake_case : Optional[Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
A__ = src_lang
A__ = self(_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , **_snake_case )
A__ = self.convert_tokens_to_ids(_snake_case )
A__ = tgt_lang_id
return inputs
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : Any , _snake_case : str ):
"""simple docstring"""
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def _a ( self : str , _snake_case : List[str] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A__ = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _a ( self : Optional[Any] , _snake_case : List[Any] ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a ( self : Optional[int] , _snake_case : Dict ):
"""simple docstring"""
A__ = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def _a ( self : Optional[Any] , _snake_case : str , _snake_case : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , 'wb' ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
def _a ( self : Tuple , _snake_case : List[str] , _snake_case : str = "eng_Latn" , _snake_case : Optional[List[str]] = None , _snake_case : str = "fra_Latn" , **_snake_case : Dict , ):
"""simple docstring"""
A__ = src_lang
A__ = tgt_lang
return super().prepare_seqaseq_batch(_snake_case , _snake_case , **_snake_case )
def _a ( self : int ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self : List[str] , _snake_case : Tuple ):
"""simple docstring"""
A__ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
A__ = []
A__ = [self.eos_token_id, self.cur_lang_code]
else:
A__ = [self.cur_lang_code]
A__ = [self.eos_token_id]
def _a ( self : List[Any] , _snake_case : str ):
"""simple docstring"""
A__ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
A__ = []
A__ = [self.eos_token_id, self.cur_lang_code]
else:
A__ = [self.cur_lang_code]
A__ = [self.eos_token_id]
| 52 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100'
SCREAMING_SNAKE_CASE__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
SCREAMING_SNAKE_CASE__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52 | 1 |
from __future__ import annotations
import queue
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a ) -> Union[str, Any]:
_a : Optional[Any] = data
_a : Dict = None
_a : Any = None
def __UpperCAmelCase ( ) -> TreeNode:
"""simple docstring"""
print('''\n********Press N to stop entering at any point of time********\n''' )
_a : Optional[Any] = input('''Enter the value of the root node: ''' ).strip().lower()
_a : queue.Queue = queue.Queue()
_a : Any = TreeNode(int(__a ) )
q.put(__a )
while not q.empty():
_a : List[str] = q.get()
_a : Any = F"""Enter the left node of {node_found.data}: """
_a : str = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
_a : str = TreeNode(int(__a ) )
_a : int = left_node
q.put(__a )
_a : Union[str, Any] = F"""Enter the right node of {node_found.data}: """
_a : List[str] = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
_a : str = TreeNode(int(__a ) )
_a : List[str] = right_node
q.put(__a )
raise
def __UpperCAmelCase ( __a : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__a ,__a ) or not node:
return
print(node.data ,end=''',''' )
pre_order(node.left )
pre_order(node.right )
def __UpperCAmelCase ( __a : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__a ,__a ) or not node:
return
in_order(node.left )
print(node.data ,end=''',''' )
in_order(node.right )
def __UpperCAmelCase ( __a : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__a ,__a ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=''',''' )
def __UpperCAmelCase ( __a : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__a ,__a ) or not node:
return
_a : queue.Queue = queue.Queue()
q.put(__a )
while not q.empty():
_a : Tuple = q.get()
print(node_dequeued.data ,end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __UpperCAmelCase ( __a : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__a ,__a ) or not node:
return
_a : queue.Queue = queue.Queue()
q.put(__a )
while not q.empty():
_a : Tuple = []
while not q.empty():
_a : List[Any] = q.get()
print(node_dequeued.data ,end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__a )
def __UpperCAmelCase ( __a : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__a ,__a ) or not node:
return
_a : list[TreeNode] = []
_a : List[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=''',''' )
stack.append(__a )
_a : List[Any] = n.left
# end of while means current node doesn't have left child
_a : str = stack.pop()
# start to traverse its right child
_a : List[str] = n.right
def __UpperCAmelCase ( __a : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__a ,__a ) or not node:
return
_a : list[TreeNode] = []
_a : Tuple = node
while n or stack:
while n:
stack.append(__a )
_a : Any = n.left
_a : Tuple = stack.pop()
print(n.data ,end=''',''' )
_a : int = n.right
def __UpperCAmelCase ( __a : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(__a ,__a ) or not node:
return
_a , _a : List[str] = [], []
_a : List[str] = node
stacka.append(__a )
while stacka: # to find the reversed order of post order, store it in stack2
_a : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__a )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=''',''' )
def __UpperCAmelCase ( __a : str = "" ,__a : Optional[int]=50 ,__a : List[Any]="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
_a , _a : Any = divmod(width - len(__a ) - 2 ,2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
a__ = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 50 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 14 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a__ = numpy.array([0, 0])
a__ = numpy.array([0.5, 0.8660254])
a__ = numpy.array([1, 0])
a__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __UpperCAmelCase ( __a : list[numpy.ndarray] ,__a : int ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = initial_vectors
for _ in range(__a ):
_a : int = iteration_step(__a )
return vectors
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
_a : str = vectors[i + 1]
new_vectors.append(__a )
_a : Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 ,60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __UpperCAmelCase ( __a : numpy.ndarray ,__a : float ) -> numpy.ndarray:
"""simple docstring"""
_a : Tuple = numpy.radians(__a )
_a , _a : List[Any] = numpy.cos(__a ), numpy.sin(__a )
_a : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__a ,__a )
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> None:
"""simple docstring"""
_a : str = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_a , _a : Optional[int] = zip(*__a )
plt.plot(__a ,__a )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 14 | 1 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : int , a__ : int = 128 , a__ : int = 256 , a__ : float = 2000.0 , a__ : int = 768 , a__ : int = 12 , a__ : int = 12 , a__ : int = 64 , a__ : int = 2048 , a__ : float = 0.1 , ):
super().__init__()
UpperCAmelCase = nn.Sequential(
nn.Linear(_lowercase , d_model * 4 , bias=_lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase ) , nn.SiLU() , )
UpperCAmelCase = nn.Embedding(_lowercase , _lowercase )
UpperCAmelCase = False
UpperCAmelCase = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase = nn.Dropout(p=_lowercase )
UpperCAmelCase = nn.ModuleList()
for lyr_num in range(_lowercase ):
# FiLM conditional T5 decoder
UpperCAmelCase = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
self.decoders.append(_lowercase )
UpperCAmelCase = TaLayerNorm(_lowercase )
UpperCAmelCase = nn.Dropout(p=_lowercase )
UpperCAmelCase = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
def __snake_case ( self : List[Any] , a__ : Union[str, Any] , a__ : Any ):
UpperCAmelCase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __snake_case ( self : Dict , a__ : List[str] , a__ : List[str] , a__ : Any ):
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCAmelCase = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCAmelCase = self.conditioning_emb(_lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCAmelCase = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCAmelCase = torch.broadcast_to(
torch.arange(_lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCAmelCase = self.position_encoding(_lowercase )
UpperCAmelCase = self.continuous_inputs_projection(_lowercase )
inputs += position_encodings
UpperCAmelCase = self.dropout(_lowercase )
# decoder: No padding present.
UpperCAmelCase = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCAmelCase = [(x, self.encoder_decoder_mask(_lowercase , _lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCAmelCase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCAmelCase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCAmelCase = lyr(
_lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0]
UpperCAmelCase = self.decoder_norm(_lowercase )
UpperCAmelCase = self.post_dropout(_lowercase )
UpperCAmelCase = self.spec_out(_lowercase )
return spec_out
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , a__ : Any , a__ : Any , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Optional[Any]=1e-6 ):
super().__init__()
UpperCAmelCase = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase ) )
def __snake_case ( self : Union[str, Any] , a__ : List[Any] , a__ : List[Any]=None , a__ : Dict=None , a__ : Optional[int]=None , a__ : List[str]=None , a__ : Optional[int]=None , ):
UpperCAmelCase = self.layer[0](
_lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , )
if encoder_hidden_states is not None:
UpperCAmelCase = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
UpperCAmelCase = self.layer[1](
_lowercase , key_value_states=_lowercase , attention_mask=_lowercase , )
# Apply Film Conditional Feed Forward layer
UpperCAmelCase = self.layer[-1](_lowercase , _lowercase )
return (hidden_states,)
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , a__ : Optional[Any] , a__ : Optional[Any] , a__ : Tuple , a__ : str ):
super().__init__()
UpperCAmelCase = TaLayerNorm(_lowercase )
UpperCAmelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
UpperCAmelCase = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
UpperCAmelCase = nn.Dropout(_lowercase )
def __snake_case ( self : Any , a__ : Union[str, Any] , a__ : List[str]=None , a__ : str=None , ):
# pre_self_attention_layer_norm
UpperCAmelCase = self.layer_norm(_lowercase )
if conditioning_emb is not None:
UpperCAmelCase = self.FiLMLayer(_lowercase , _lowercase )
# Self-attention block
UpperCAmelCase = self.attention(_lowercase )
UpperCAmelCase = hidden_states + self.dropout(_lowercase )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , a__ : Any , a__ : Any , a__ : Optional[Any] , a__ : List[str] , a__ : int ):
super().__init__()
UpperCAmelCase = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
UpperCAmelCase = TaLayerNorm(_lowercase , eps=_lowercase )
UpperCAmelCase = nn.Dropout(_lowercase )
def __snake_case ( self : Any , a__ : Any , a__ : Any=None , a__ : str=None , ):
UpperCAmelCase = self.layer_norm(_lowercase )
UpperCAmelCase = self.attention(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1 ) , )
UpperCAmelCase = hidden_states + self.dropout(_lowercase )
return layer_output
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , a__ : Dict , a__ : str , a__ : Dict , a__ : Any ):
super().__init__()
UpperCAmelCase = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
UpperCAmelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
UpperCAmelCase = TaLayerNorm(_lowercase , eps=_lowercase )
UpperCAmelCase = nn.Dropout(_lowercase )
def __snake_case ( self : Optional[Any] , a__ : List[Any] , a__ : Any=None ):
UpperCAmelCase = self.layer_norm(_lowercase )
if conditioning_emb is not None:
UpperCAmelCase = self.film(_lowercase , _lowercase )
UpperCAmelCase = self.DenseReluDense(_lowercase )
UpperCAmelCase = hidden_states + self.dropout(_lowercase )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , a__ : List[Any] , a__ : Optional[int] , a__ : Union[str, Any] ):
super().__init__()
UpperCAmelCase = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase = nn.Dropout(_lowercase )
UpperCAmelCase = NewGELUActivation()
def __snake_case ( self : Dict , a__ : Any ):
UpperCAmelCase = self.act(self.wi_a(_lowercase ) )
UpperCAmelCase = self.wi_a(_lowercase )
UpperCAmelCase = hidden_gelu * hidden_linear
UpperCAmelCase = self.dropout(_lowercase )
UpperCAmelCase = self.wo(_lowercase )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , a__ : Union[str, Any] , a__ : Optional[Any]=1e-6 ):
super().__init__()
UpperCAmelCase = nn.Parameter(torch.ones(_lowercase ) )
UpperCAmelCase = eps
def __snake_case ( self : Any , a__ : List[str] ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
UpperCAmelCase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowercase )
UpperCAmelCase = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCAmelCase = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __snake_case ( self : Optional[int] , a__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(_lowercase , 3.0 )) ))
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , a__ : Any , a__ : Optional[int] ):
super().__init__()
UpperCAmelCase = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase )
def __snake_case ( self : Dict , a__ : Union[str, Any] , a__ : Optional[Any] ):
UpperCAmelCase = self.scale_bias(_lowercase )
UpperCAmelCase, UpperCAmelCase = torch.chunk(_lowercase , 2 , -1 )
UpperCAmelCase = x * (1 + scale) + shift
return x
| 703 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a__ : Dict = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 570 | 0 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : list[list[str]] = [[] for _ in range(lowerCAmelCase__ )]
UpperCAmelCase__ : Union[str, Any] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(lowerCAmelCase__ ) <= key:
return input_string
for position, character in enumerate(lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = position % (lowest * 2) # puts it in bounds
UpperCAmelCase__ : Tuple = min(lowerCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCAmelCase__ )
UpperCAmelCase__ : int = [''''''.join(lowerCAmelCase__ ) for row in temp_grid]
UpperCAmelCase__ : Dict = ''''''.join(lowerCAmelCase__ )
return output_string
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : List[Any] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
UpperCAmelCase__ : list[list[str]] = [[] for _ in range(lowerCAmelCase__ )] # generates template
for position in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : Optional[int] = position % (lowest * 2) # puts it in bounds
UpperCAmelCase__ : Optional[int] = min(lowerCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
UpperCAmelCase__ : Tuple = 0
for row in temp_grid: # fills in the characters
UpperCAmelCase__ : Tuple = input_string[counter : counter + len(lowerCAmelCase__ )]
grid.append(list(lowerCAmelCase__ ) )
counter += len(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = '''''' # reads as zigzag
for position in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : int = position % (lowest * 2) # puts it in bounds
UpperCAmelCase__ : int = min(lowerCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def a__ ( lowerCAmelCase__ ) -> dict[int, str]:
UpperCAmelCase__ : Tuple = {}
for key_guess in range(1 , len(lowerCAmelCase__ ) ): # tries every key
UpperCAmelCase__ : Any = decrypt(lowerCAmelCase__ , lowerCAmelCase__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
import math
def a__ ( lowerCAmelCase__ ) -> list[int]:
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Optional[Any] = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment
UpperCAmelCase__ : str = [True] * (end + 1)
UpperCAmelCase__ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase__ )
for i in range(start * start , end + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = False
start += 1
prime += in_prime
UpperCAmelCase__ : Optional[int] = end + 1
UpperCAmelCase__ : str = min(2 * end , lowerCAmelCase__ )
while low <= n:
UpperCAmelCase__ : List[str] = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase__ : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = False
for j in range(len(lowerCAmelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase__ : Union[str, Any] = high + 1
UpperCAmelCase__ : str = min(high + end , lowerCAmelCase__ )
return prime
print(sieve(1_0**6))
| 75 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __lowerCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple = None , ) -> Union[str, Any]:
__lowerCAmelCase ={}
if train_file is not None:
__lowerCAmelCase =[train_file]
if eval_file is not None:
__lowerCAmelCase =[eval_file]
if test_file is not None:
__lowerCAmelCase =[test_file]
__lowerCAmelCase =datasets.load_dataset("""csv""" , data_files=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase =list(ds[list(files.keys() )[0]].features.keys() )
__lowerCAmelCase =features_name.pop(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase =list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowerCAmelCase ={label: i for i, label in enumerate(_SCREAMING_SNAKE_CASE )}
__lowerCAmelCase =tokenizer.model_input_names
__lowerCAmelCase ={}
if len(_SCREAMING_SNAKE_CASE ) == 1:
for k in files.keys():
__lowerCAmelCase =ds[k].map(
lambda __lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="""max_length""" ) , batched=_SCREAMING_SNAKE_CASE , )
elif len(_SCREAMING_SNAKE_CASE ) == 2:
for k in files.keys():
__lowerCAmelCase =ds[k].map(
lambda __lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding="""max_length""" , ) , batched=_SCREAMING_SNAKE_CASE , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowerCAmelCase ={k: v for k, v in ex.items() if k in input_names}
__lowerCAmelCase =labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowerCAmelCase ={k: v for k, v in ex.items() if k in input_names}
__lowerCAmelCase =labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowerCAmelCase ={k: v for k, v in ex.items() if k in input_names}
__lowerCAmelCase =labelaid[ex[label_name]]
yield (d, label)
__lowerCAmelCase =(
tf.data.Dataset.from_generator(
_SCREAMING_SNAKE_CASE , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowerCAmelCase =train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowerCAmelCase =(
tf.data.Dataset.from_generator(
_SCREAMING_SNAKE_CASE , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowerCAmelCase =val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowerCAmelCase =(
tf.data.Dataset.from_generator(
_SCREAMING_SNAKE_CASE , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowerCAmelCase =test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowercase_ = logging.getLogger(__name__)
@dataclass
class __a :
SCREAMING_SNAKE_CASE = field(metadata={"help": "Which column contains the label"} )
SCREAMING_SNAKE_CASE = field(default=snake_case__ , metadata={"help": "The path of the training file"} )
SCREAMING_SNAKE_CASE = field(default=snake_case__ , metadata={"help": "The path of the development file"} )
SCREAMING_SNAKE_CASE = field(default=snake_case__ , metadata={"help": "The path of the test file"} )
SCREAMING_SNAKE_CASE = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class __a :
SCREAMING_SNAKE_CASE = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE = field(default=snake_case__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE = field(
default=snake_case__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def __lowerCAmelCase ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_SCREAMING_SNAKE_CASE , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowerCAmelCase =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowerCAmelCase =TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
def compute_metrics(__lowerCamelCase : List[str] ) -> Dict:
__lowerCAmelCase =np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowerCAmelCase =TFTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCAmelCase ={}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCAmelCase =trainer.evaluate()
__lowerCAmelCase =os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
main()
| 702 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __lowerCAmelCase ( __lowerCamelCase : str = "laptop" ) -> DataFrame:
__lowerCAmelCase =f"""https://www.amazon.in/laptop/s?k={product}"""
__lowerCAmelCase ={
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
__lowerCAmelCase =BeautifulSoup(requests.get(__lowerCamelCase , headers=__lowerCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
__lowerCAmelCase =DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
__lowerCAmelCase =item.ha.text
__lowerCAmelCase ="""https://www.amazon.in/""" + item.ha.a["""href"""]
__lowerCAmelCase =item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
__lowerCAmelCase =item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
__lowerCAmelCase ="""Not available"""
try:
__lowerCAmelCase =(
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
__lowerCAmelCase =""""""
try:
__lowerCAmelCase =float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 100 )
except ValueError:
__lowerCAmelCase =float("""nan""" )
except AttributeError:
pass
__lowerCAmelCase =[
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__lowerCAmelCase =""" """
__lowerCAmelCase =""" """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase_ = '''headphones'''
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
| 456 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class __a ( _UpperCAmelCase ):
lowerCamelCase : Any ='camembert'
def __init__( self , UpperCAmelCase=3_0522 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.0_2 , UpperCAmelCase=1E-1_2 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = classifier_dropout
class __a ( _UpperCAmelCase ):
@property
def lowerCamelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 552 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : str = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class a_ ( _UpperCAmelCase ):
a : Any = 'camembert'
def __init__( self : Dict , __UpperCamelCase : Union[str, Any]=3_05_22 , __UpperCamelCase : Tuple=7_68 , __UpperCamelCase : Optional[Any]=12 , __UpperCamelCase : int=12 , __UpperCamelCase : int=30_72 , __UpperCamelCase : int="gelu" , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : List[Any]=5_12 , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : Dict=0.0_2 , __UpperCamelCase : Optional[int]=1e-12 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : int=0 , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : Optional[int]="absolute" , __UpperCamelCase : int=True , __UpperCamelCase : Dict=None , **__UpperCamelCase : List[str] , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class a_ ( _UpperCAmelCase ):
@property
def _snake_case ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] ) | 555 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _A ( lowerCAmelCase_ : Namespace ):
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCamelCase = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> int:
lowerCAmelCase__ = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=SCREAMING_SNAKE_CASE__ , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : Any , ) -> List[str]:
lowerCAmelCase__ = logging.get_logger("transformers-cli/converting" )
self._logger.info(f'Loading model {model_type}' )
lowerCAmelCase__ = model_type
lowerCAmelCase__ = tf_checkpoint
lowerCAmelCase__ = pytorch_dump_output
lowerCAmelCase__ = config
lowerCAmelCase__ = finetuning_task_name
def a ( self : Tuple ) -> Dict:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
if "ckpt" in self._tf_checkpoint.lower():
lowerCAmelCase__ = self._tf_checkpoint
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = self._tf_checkpoint
lowerCAmelCase__ = ""
convert_transfo_xl_checkpoint_to_pytorch(
SCREAMING_SNAKE_CASE__ , self._config , self._pytorch_dump_output , SCREAMING_SNAKE_CASE__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 717 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = 42
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : Tuple[str] = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple[str] = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple[int] = (64,) , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "silu" , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : int = 256 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : float = 0.18_215 , SCREAMING_SNAKE_CASE__ : str = "group" , ) -> Optional[int]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase__ = Encoder(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , down_block_types=SCREAMING_SNAKE_CASE__ , block_out_channels=SCREAMING_SNAKE_CASE__ , layers_per_block=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , double_z=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
lowerCAmelCase__ = VectorQuantizer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , beta=0.25 , remap=SCREAMING_SNAKE_CASE__ , sane_index_shape=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
# pass init params to Decoder
lowerCAmelCase__ = Decoder(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , up_block_types=SCREAMING_SNAKE_CASE__ , block_out_channels=SCREAMING_SNAKE_CASE__ , layers_per_block=SCREAMING_SNAKE_CASE__ , act_fn=SCREAMING_SNAKE_CASE__ , norm_num_groups=SCREAMING_SNAKE_CASE__ , norm_type=SCREAMING_SNAKE_CASE__ , )
@apply_forward_hook
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True ) -> VQEncoderOutput:
lowerCAmelCase__ = self.encoder(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.quant_conv(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE__ )
@apply_forward_hook
def a ( self : int , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.quantize(SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase__ = h
lowerCAmelCase__ = self.post_quant_conv(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.decoder(SCREAMING_SNAKE_CASE__ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__ )
def a ( self : str , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase__ = sample
lowerCAmelCase__ = self.encode(SCREAMING_SNAKE_CASE__ ).latents
lowerCAmelCase__ = self.decode(SCREAMING_SNAKE_CASE__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=SCREAMING_SNAKE_CASE__ )
| 125 | 0 |
'''simple docstring'''
a__ : Union[str, Any] ='''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
a__ : Dict =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
a__ : Union[str, Any] ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 399 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Optional[int] =logging.get_logger(__name__)
set_seed(770)
a__ : str ={
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
a__ : str ={
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
a__ : Dict =os.path.dirname(os.path.abspath(__file__))
a__ : str =os.path.join(os.path.expanduser('''~'''), '''.cache''')
a__ : str =os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def lowercase__ ( __lowercase : List[str] , __lowercase : Optional[int]=False ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = model_type
if use_small:
key += "_small"
return os.path.join(__lowercase , REMOTE_MODEL_PATHS[key]['file_name'] )
def lowercase__ ( __lowercase : Optional[int] , __lowercase : int ) -> str:
"""simple docstring"""
os.makedirs(__lowercase , exist_ok=__lowercase )
hf_hub_download(repo_id=__lowercase , filename=__lowercase , local_dir=__lowercase )
def lowercase__ ( __lowercase : int , __lowercase : Tuple , __lowercase : Optional[int]=False , __lowercase : Tuple="text" ) -> Optional[Any]:
"""simple docstring"""
if model_type == "text":
__UpperCamelCase = BarkSemanticModel
__UpperCamelCase = BarkSemanticConfig
__UpperCamelCase = BarkSemanticGenerationConfig
elif model_type == "coarse":
__UpperCamelCase = BarkCoarseModel
__UpperCamelCase = BarkCoarseConfig
__UpperCamelCase = BarkCoarseGenerationConfig
elif model_type == "fine":
__UpperCamelCase = BarkFineModel
__UpperCamelCase = BarkFineConfig
__UpperCamelCase = BarkFineGenerationConfig
else:
raise NotImplementedError()
__UpperCamelCase = F'''{model_type}_small''' if use_small else model_type
__UpperCamelCase = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__lowercase ):
logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info['repo_id'] , model_info['file_name'] )
__UpperCamelCase = torch.load(__lowercase , map_location=__lowercase )
# this is a hack
__UpperCamelCase = checkpoint['model_args']
if "input_vocab_size" not in model_args:
__UpperCamelCase = model_args['vocab_size']
__UpperCamelCase = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__UpperCamelCase = model_args.pop('n_head' )
__UpperCamelCase = model_args.pop('n_embd' )
__UpperCamelCase = model_args.pop('n_layer' )
__UpperCamelCase = ConfigClass(**checkpoint['model_args'] )
__UpperCamelCase = ModelClass(config=__lowercase )
__UpperCamelCase = GenerationConfigClass()
__UpperCamelCase = model_generation_config
__UpperCamelCase = checkpoint['model']
# fixup checkpoint
__UpperCamelCase = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(__lowercase ):
# replace part of the key with corresponding layer name in HF implementation
__UpperCamelCase = k[len(__lowercase ) :]
for old_layer_name in new_layer_name_dict:
__UpperCamelCase = new_k.replace(__lowercase , new_layer_name_dict[old_layer_name] )
__UpperCamelCase = state_dict.pop(__lowercase )
__UpperCamelCase = set(state_dict.keys() ) - set(model.state_dict().keys() )
__UpperCamelCase = {k for k in extra_keys if not k.endswith('.attn.bias' )}
__UpperCamelCase = set(model.state_dict().keys() ) - set(state_dict.keys() )
__UpperCamelCase = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(__lowercase ) != 0:
raise ValueError(F'''extra keys found: {extra_keys}''' )
if len(__lowercase ) != 0:
raise ValueError(F'''missing keys: {missing_keys}''' )
model.load_state_dict(__lowercase , strict=__lowercase )
__UpperCamelCase = model.num_parameters(exclude_embeddings=__lowercase )
__UpperCamelCase = checkpoint['best_val_loss'].item()
logger.info(F'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(__lowercase , 3 )} loss''' )
model.eval()
model.to(__lowercase )
del checkpoint, state_dict
return model
def lowercase__ ( __lowercase : List[Any] , __lowercase : Any=False , __lowercase : List[Any]="text" ) -> int:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__UpperCamelCase = 'cpu' # do conversion on cpu
__UpperCamelCase = _get_ckpt_path(__lowercase , use_small=__lowercase )
__UpperCamelCase = _load_model(__lowercase , __lowercase , model_type=__lowercase , use_small=__lowercase )
# load bark initial model
__UpperCamelCase = _bark_load_model(__lowercase , 'cpu' , model_type=__lowercase , use_small=__lowercase )
if model_type == "text":
__UpperCamelCase = bark_model['model']
if model.num_parameters(exclude_embeddings=__lowercase ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
__UpperCamelCase = 5
__UpperCamelCase = 10
if model_type in ["text", "coarse"]:
__UpperCamelCase = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__UpperCamelCase = bark_model(__lowercase )[0]
__UpperCamelCase = model(__lowercase )
# take last logits
__UpperCamelCase = output_new_model_total.logits[:, [-1], :]
else:
__UpperCamelCase = 3
__UpperCamelCase = 8
__UpperCamelCase = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__UpperCamelCase = model(__lowercase , __lowercase )
__UpperCamelCase = bark_model(__lowercase , __lowercase )
__UpperCamelCase = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
def lowercase__ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : List[Any] , ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = os.path.join(__lowercase , __lowercase )
__UpperCamelCase = BarkSemanticConfig.from_pretrained(os.path.join(__lowercase , 'config.json' ) )
__UpperCamelCase = BarkCoarseConfig.from_pretrained(os.path.join(__lowercase , 'config.json' ) )
__UpperCamelCase = BarkFineConfig.from_pretrained(os.path.join(__lowercase , 'config.json' ) )
__UpperCamelCase = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
__UpperCamelCase = BarkSemanticModel.from_pretrained(__lowercase )
__UpperCamelCase = BarkCoarseModel.from_pretrained(__lowercase )
__UpperCamelCase = BarkFineModel.from_pretrained(__lowercase )
__UpperCamelCase = EncodecModel.from_pretrained('facebook/encodec_24khz' )
__UpperCamelCase = BarkConfig.from_sub_model_configs(
__lowercase , __lowercase , __lowercase , __lowercase )
__UpperCamelCase = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__UpperCamelCase = BarkModel(__lowercase )
__UpperCamelCase = semantic
__UpperCamelCase = coarseAcoustic
__UpperCamelCase = fineAcoustic
__UpperCamelCase = codec
__UpperCamelCase = bark_generation_config
Path(__lowercase ).mkdir(exist_ok=__lowercase )
bark.save_pretrained(__lowercase , repo_id=__lowercase , push_to_hub=__lowercase )
if __name__ == "__main__":
a__ : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
a__ : Optional[int] =parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 399 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , a : Tuple , a : str=13 , a : List[Any]=7 , a : Dict=True , a : Optional[int]=True , a : Optional[int]=True , a : Any=True , a : Any=99 , a : Optional[int]=24 , a : Tuple=2 , a : int=6 , a : Dict=37 , a : List[str]="gelu" , a : str=0.1 , a : List[Any]=0.1 , a : Any=512 , a : Union[str, Any]=16 , a : int=2 , a : Optional[Any]=0.02 , a : Any=3 , a : Tuple=None , a : Optional[int]=1_000 , )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = range_bbox
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[Any]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase__ = bbox[i, j, 3]
lowercase__ = bbox[i, j, 1]
lowercase__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase__ = bbox[i, j, 2]
lowercase__ = bbox[i, j, 0]
lowercase__ = t
lowercase__ = None
if self.use_input_mask:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> str:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : int , a : Optional[int] , a : str , a : Union[str, Any] , a : str , a : Dict , a : Tuple , )-> List[str]:
"""simple docstring"""
lowercase__ = LiltModel(config=A_ )
model.to(A_ )
model.eval()
lowercase__ = model(A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ )
lowercase__ = model(A_ , bbox=A_ , token_type_ids=A_ )
lowercase__ = model(A_ , bbox=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : str , a : Dict , a : Union[str, Any] , a : Tuple , a : Dict , a : List[str] , a : Dict , a : int , )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = LiltForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
lowercase__ = model(
A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : int , a : List[str] , a : Dict , a : Optional[int] , a : Optional[int] , a : Any , a : Dict , a : Dict , )-> List[Any]:
"""simple docstring"""
lowercase__ = LiltForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
lowercase__ = model(
A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (_lowercase , _lowercase , _lowercase , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self : Any , a : List[Any] , a : Optional[Any] , a : Any , a : Dict , a : Union[str, Any] )-> int:
"""simple docstring"""
return True
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[int]:
"""simple docstring"""
lowercase__ = LiltModelTester(self )
lowercase__ = ConfigTester(self , config_class=A_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ = type
self.model_tester.create_and_check_model(*A_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = LiltModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : int )-> int:
"""simple docstring"""
lowercase__ = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(A_ )
lowercase__ = torch.tensor([[1, 2]] , device=A_ )
lowercase__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A_ )
# forward pass
with torch.no_grad():
lowercase__ = model(input_ids=A_ , bbox=A_ )
lowercase__ = torch.Size([1, 2, 768] )
lowercase__ = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=A_ , )
self.assertTrue(outputs.last_hidden_state.shape , A_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A_ , atol=1E-3 ) )
| 700 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Tuple = 'ClapFeatureExtractor'
_UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[Any] , a : int , a : str )-> Any:
"""simple docstring"""
super().__init__(a , a )
def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = kwargs.pop('sampling_rate' , a )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
lowercase__ = self.tokenizer(a , return_tensors=a , **a )
if audios is not None:
lowercase__ = self.feature_extractor(
a , sampling_rate=a , return_tensors=a , **a )
if text is not None and audios is not None:
lowercase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 45 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 330 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase_ = logging.get_logger(__name__)
# General docstring
lowerCamelCase_ = '''RegNetConfig'''
# Base docstring
lowerCamelCase_ = '''facebook/regnet-y-040'''
lowerCamelCase_ = [1, 10_88, 7, 7]
# Image classification docstring
lowerCamelCase_ = '''facebook/regnet-y-040'''
lowerCamelCase_ = '''tabby, tabby cat'''
lowerCamelCase_ = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[str] = "relu" , ):
'''simple docstring'''
super().__init__()
_A = nn.Convad(
__UpperCAmelCase , __UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=kernel_size // 2 , groups=__UpperCAmelCase , bias=__UpperCAmelCase , )
_A = nn.BatchNormad(__UpperCAmelCase )
_A = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCAmelCase ( self : str , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = self.convolution(__UpperCAmelCase )
_A = self.normalization(__UpperCAmelCase )
_A = self.activation(__UpperCAmelCase )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : RegNetConfig ):
'''simple docstring'''
super().__init__()
_A = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
_A = config.num_channels
def lowerCAmelCase ( self : int , __UpperCAmelCase : str ):
'''simple docstring'''
_A = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
_A = self.embedder(__UpperCAmelCase )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 2 ):
'''simple docstring'''
super().__init__()
_A = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , stride=__UpperCAmelCase , bias=__UpperCAmelCase )
_A = nn.BatchNormad(__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Tensor ):
'''simple docstring'''
_A = self.convolution(__UpperCAmelCase )
_A = self.normalization(__UpperCAmelCase )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int ):
'''simple docstring'''
super().__init__()
_A = nn.AdaptiveAvgPoolad((1, 1) )
_A = nn.Sequential(
nn.Convad(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 ) , nn.Sigmoid() , )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.pooler(__UpperCAmelCase )
_A = self.attention(__UpperCAmelCase )
_A = hidden_state * attention
return hidden_state
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : RegNetConfig , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 1 ):
'''simple docstring'''
super().__init__()
_A = in_channels != out_channels or stride != 1
_A = max(1 , out_channels // config.groups_width )
_A = (
RegNetShortCut(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
_A = nn.Sequential(
RegNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase , groups=__UpperCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , activation=__UpperCAmelCase ) , )
_A = ACTaFN[config.hidden_act]
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Any ):
'''simple docstring'''
_A = hidden_state
_A = self.layer(__UpperCAmelCase )
_A = self.shortcut(__UpperCAmelCase )
hidden_state += residual
_A = self.activation(__UpperCAmelCase )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCAmelCase : RegNetConfig , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 1 ):
'''simple docstring'''
super().__init__()
_A = in_channels != out_channels or stride != 1
_A = max(1 , out_channels // config.groups_width )
_A = (
RegNetShortCut(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
_A = nn.Sequential(
RegNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase , groups=__UpperCAmelCase , activation=config.hidden_act ) , RegNetSELayer(__UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , activation=__UpperCAmelCase ) , )
_A = ACTaFN[config.hidden_act]
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any ):
'''simple docstring'''
_A = hidden_state
_A = self.layer(__UpperCAmelCase )
_A = self.shortcut(__UpperCAmelCase )
hidden_state += residual
_A = self.activation(__UpperCAmelCase )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : RegNetConfig , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 2 , ):
'''simple docstring'''
super().__init__()
_A = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
_A = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase , ) , *[layer(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for _ in range(depth - 1 )] , )
def lowerCAmelCase ( self : int , __UpperCAmelCase : List[str] ):
'''simple docstring'''
_A = self.layers(__UpperCAmelCase )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCAmelCase : RegNetConfig ):
'''simple docstring'''
super().__init__()
_A = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_A = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__UpperCAmelCase , config.depths[1:] ):
self.stages.append(RegNetStage(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , depth=__UpperCAmelCase ) )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : Tensor , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True ):
'''simple docstring'''
_A = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_A = hidden_states + (hidden_state,)
_A = stage_module(__UpperCAmelCase )
if output_hidden_states:
_A = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__UpperCAmelCase , hidden_states=__UpperCAmelCase )
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = RegNetConfig
snake_case = '''regnet'''
snake_case = '''pixel_values'''
snake_case = True
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(__UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str]=False ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = value
lowerCamelCase_ = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCamelCase_ = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , snake_case_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : Dict ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
_A = config
_A = RegNetEmbeddings(__UpperCAmelCase )
_A = RegNetEncoder(__UpperCAmelCase )
_A = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Tensor , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None ):
'''simple docstring'''
_A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_A = return_dict if return_dict is not None else self.config.use_return_dict
_A = self.embedder(__UpperCAmelCase )
_A = self.encoder(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase )
_A = encoder_outputs[0]
_A = self.pooler(__UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCAmelCase , pooler_output=__UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , snake_case_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
_A = config.num_labels
_A = RegNetModel(__UpperCAmelCase )
# classification head
_A = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[torch.LongTensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ):
'''simple docstring'''
_A = return_dict if return_dict is not None else self.config.use_return_dict
_A = self.regnet(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase )
_A = outputs.pooler_output if return_dict else outputs[1]
_A = self.classifier(__UpperCAmelCase )
_A = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_A = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_A = "single_label_classification"
else:
_A = "multi_label_classification"
if self.config.problem_type == "regression":
_A = MSELoss()
if self.num_labels == 1:
_A = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_A = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
_A = CrossEntropyLoss()
_A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_A = BCEWithLogitsLoss()
_A = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
if not return_dict:
_A = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states )
| 330 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = "deta"
__snake_case : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : List[Any] ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : Optional[int]=900 ,lowerCamelCase__ : Optional[Any]=2048 ,lowerCamelCase__ : Optional[Any]=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : Optional[int]=8 ,lowerCamelCase__ : List[str]=6 ,lowerCamelCase__ : Union[str, Any]=1024 ,lowerCamelCase__ : str=8 ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : Dict="relu" ,lowerCamelCase__ : int=256 ,lowerCamelCase__ : Optional[Any]=0.1 ,lowerCamelCase__ : Dict=0.0 ,lowerCamelCase__ : Dict=0.0 ,lowerCamelCase__ : Dict=0.02 ,lowerCamelCase__ : List[Any]=1.0 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : int=False ,lowerCamelCase__ : int="sine" ,lowerCamelCase__ : Any=5 ,lowerCamelCase__ : Optional[int]=4 ,lowerCamelCase__ : Optional[int]=4 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Optional[int]=300 ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[int]=1 ,lowerCamelCase__ : Tuple=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Dict=1 ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : List[Any]=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : List[str]=0.25 ,**lowerCamelCase__ : int ,) -> List[Any]:
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = backbone_config
SCREAMING_SNAKE_CASE = num_queries
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = init_xavier_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = auxiliary_loss
SCREAMING_SNAKE_CASE = position_embedding_type
# deformable attributes
SCREAMING_SNAKE_CASE = num_feature_levels
SCREAMING_SNAKE_CASE = encoder_n_points
SCREAMING_SNAKE_CASE = decoder_n_points
SCREAMING_SNAKE_CASE = two_stage
SCREAMING_SNAKE_CASE = two_stage_num_proposals
SCREAMING_SNAKE_CASE = with_box_refine
SCREAMING_SNAKE_CASE = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
SCREAMING_SNAKE_CASE = class_cost
SCREAMING_SNAKE_CASE = bbox_cost
SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE = mask_loss_coefficient
SCREAMING_SNAKE_CASE = dice_loss_coefficient
SCREAMING_SNAKE_CASE = bbox_loss_coefficient
SCREAMING_SNAKE_CASE = giou_loss_coefficient
SCREAMING_SNAKE_CASE = eos_coefficient
SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase__ ,**lowerCamelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
'''simple docstring'''
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 116 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = "pegasus"
__snake_case : List[Any] = ["past_key_values"]
__snake_case : Any = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict ,lowerCamelCase__ : List[str]=50265 ,lowerCamelCase__ : int=1024 ,lowerCamelCase__ : Optional[Any]=12 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : Dict=16 ,lowerCamelCase__ : Optional[Any]=12 ,lowerCamelCase__ : str=4096 ,lowerCamelCase__ : Dict=16 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : Dict=0.0 ,lowerCamelCase__ : str=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : List[Any]="gelu" ,lowerCamelCase__ : Tuple=1024 ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : Dict=0.0 ,lowerCamelCase__ : Optional[int]=0.02 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : List[Any]=False ,lowerCamelCase__ : Union[str, Any]=0 ,lowerCamelCase__ : Optional[int]=1 ,lowerCamelCase__ : Any=1 ,**lowerCamelCase__ : Dict ,) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,forced_eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
'''simple docstring'''
return self.d_model
| 116 | 1 |
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
__a = len(lowerCAmelCase__ )
__a = []
for i in range(len(lowerCAmelCase__ ) - pat_len + 1 ):
__a = True
for j in range(lowerCAmelCase__ ):
if s[i + j] != pattern[j]:
__a = False
break
if match_found:
position.append(lowerCAmelCase__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 99 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__snake_case : Optional[Any] = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__snake_case : int = dataset.iloc[:, 1:2].values
__snake_case : Tuple = dataset.iloc[:, 2].values
__snake_case , __snake_case , __snake_case , __snake_case : int = train_test_split(X, y, test_size=0.2, random_state=0)
__snake_case : Tuple = PolynomialFeatures(degree=4)
__snake_case : Any = poly_reg.fit_transform(X)
__snake_case : List[str] = LinearRegression()
pol_reg.fit(X_poly, y)
def _lowercase ( ) -> List[Any]:
plt.scatter(__snake_case ,__snake_case ,color="red" )
plt.plot(__snake_case ,pol_reg.predict(poly_reg.fit_transform(__snake_case ) ) ,color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003 | 293 | 0 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowercase = Lock()
def A (__lowerCamelCase :str , __lowerCamelCase :List[Any] , __lowerCamelCase :Optional[int] , __lowerCamelCase :List[Any] , __lowerCamelCase :str , __lowerCamelCase :str , __lowerCamelCase :List[str] ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__lowerCamelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowerCAmelCase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowerCAmelCase = min(__lowerCamelCase , __lowerCamelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__lowerCamelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowerCAmelCase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowerCAmelCase = max(__lowerCamelCase , __lowerCamelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__lowerCamelCase )
def A (__lowerCamelCase :Dict ):
_lowerCAmelCase = []
_lowerCAmelCase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowerCAmelCase = Pipe()
_lowerCAmelCase = Pipe()
process_array_.append(
Process(
target=__lowerCamelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowerCAmelCase = temp_rs
_lowerCAmelCase = temp_rr
for i in range(1 , len(__lowerCamelCase ) - 1 ):
_lowerCAmelCase = Pipe()
_lowerCAmelCase = Pipe()
process_array_.append(
Process(
target=__lowerCamelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowerCAmelCase = temp_rs
_lowerCAmelCase = temp_rr
process_array_.append(
Process(
target=__lowerCamelCase , args=(
len(__lowerCamelCase ) - 1,
arr[len(__lowerCamelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__lowerCamelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__lowerCamelCase ) ):
_lowerCAmelCase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def A ():
_lowerCAmelCase = list(range(10 , 0 , -1 ) )
print("""Initial List""" )
print(*__lowerCamelCase )
_lowerCAmelCase = odd_even_transposition(__lowerCamelCase )
print("""Sorted List\n""" )
print(*__lowerCamelCase )
if __name__ == "__main__":
main()
| 162 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=18 , _lowercase=30 , _lowercase=400 , _lowercase=True , _lowercase=None , _lowercase=True , ):
"""simple docstring"""
_lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
def _lowercase ( self ):
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = ImageGPTImageProcessingTester(self )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """clusters""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
_lowerCAmelCase = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowercase , obj[key] ) )
else:
self.assertEqual(obj[key] , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = os.path.join(_lowercase , """image_processor.json""" )
image_processor_first.to_json_file(_lowercase )
_lowerCAmelCase = self.image_processing_class.from_json_file(_lowercase ).to_dict()
_lowerCAmelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowercase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_lowercase )
_lowerCAmelCase = self.image_processing_class.from_pretrained(_lowercase ).to_dict()
_lowerCAmelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowercase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowercase )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def _lowercase ( self ):
"""simple docstring"""
pass
def A ():
_lowerCAmelCase = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
_lowerCAmelCase = Image.open(dataset[4]["""file"""] )
_lowerCAmelCase = Image.open(dataset[5]["""file"""] )
_lowerCAmelCase = [imagea, imagea]
return images
@require_vision
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_lowerCAmelCase = prepare_images()
# test non-batched
_lowerCAmelCase = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
_lowerCAmelCase = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _lowercase )
# test batched
_lowerCAmelCase = image_processing(_lowercase , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
_lowerCAmelCase = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _lowercase )
| 162 | 1 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ (__lowercase :Dict , __lowercase :Any , __lowercase :Optional[int]=1024 , __lowercase :Any=1024 , __lowercase :Tuple=False , **__lowercase :List[Any] ) -> Tuple:
_A : Any = AutoTokenizer.from_pretrained(__lowercase )
_A : Optional[Any] = SeqaSeqDataset(__lowercase , __lowercase , __lowercase , __lowercase , type_path='''train''' , **__lowercase )
_A : Any = tok.pad_token_id
def get_lens(__lowercase :int ):
_A : Optional[int] = tqdm(
DataLoader(__lowercase , batch_size=512 , num_workers=8 , shuffle=__lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_A : Any = []
for batch in dl:
_A : Optional[int] = batch['''input_ids'''].ne(__lowercase ).sum(1 ).tolist()
_A : str = batch['''labels'''].ne(__lowercase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__lowercase , __lowercase ):
max_lens.append(max(__lowercase , __lowercase ) )
else:
max_lens.extend(__lowercase )
return max_lens
_A : str = get_lens(__lowercase )
_A : Any = SeqaSeqDataset(__lowercase , __lowercase , __lowercase , __lowercase , type_path='''val''' , **__lowercase )
_A : Optional[Any] = get_lens(__lowercase )
pickle_save(__lowercase , train_ds.len_file )
pickle_save(__lowercase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 206 |
from PIL import Image
def a__ (__lowercase :Image , __lowercase :float ) -> Image:
def brightness(__lowercase :int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__lowercase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_UpperCamelCase : Any =change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 206 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : str=32 * 8 , __SCREAMING_SNAKE_CASE : Dict=32 * 8 , __SCREAMING_SNAKE_CASE : Union[str, Any]=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=64 , ):
__a = parent
__a = batch_size
__a = is_training
__a = use_auxiliary_loss
__a = num_queries
__a = num_channels
__a = min_size
__a = max_size
__a = num_labels
__a = hidden_dim
__a = hidden_dim
def _UpperCAmelCase ( self : Union[str, Any] ):
__a = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A__ )
__a = torch.ones([self.batch_size, self.min_size, self.max_size] , device=A__ )
__a = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=A__ ) > 0.5
).float()
__a = (torch.rand((self.batch_size, self.num_labels) , device=A__ ) > 0.5).long()
__a = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCAmelCase ( self : List[Any] ):
__a = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__a = self.num_queries
__a = self.num_labels
__a = [1, 1, 1, 1]
__a = self.num_channels
__a = 64
__a = 1_28
__a = self.hidden_dim
__a = self.hidden_dim
__a = self.hidden_dim
return config
def _UpperCAmelCase ( self : Dict ):
__a , __a , __a , __a , __a = self.prepare_config_and_inputs()
__a = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _UpperCAmelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] ):
__a = output.encoder_hidden_states
__a = output.pixel_decoder_hidden_states
__a = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A__ ) , config.decoder_layers )
def _UpperCAmelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
with torch.no_grad():
__a = MaskaFormerModel(config=A__ )
model.to(A__ )
model.eval()
__a = model(pixel_values=A__ , pixel_mask=A__ )
__a = model(A__ , output_hidden_states=A__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A__ , A__ )
def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] ):
__a = MaskaFormerForUniversalSegmentation(config=A__ )
model.to(A__ )
model.eval()
def comm_check_on_output(__SCREAMING_SNAKE_CASE : Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__a = model(pixel_values=A__ , pixel_mask=A__ )
__a = model(A__ )
comm_check_on_output(A__ )
__a = model(
pixel_values=A__ , pixel_mask=A__ , mask_labels=A__ , class_labels=A__ )
comm_check_on_output(A__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A_ ( __a , __a , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _UpperCAmelCase ( self : Any ):
__a = MaskaFormerModelTester(self )
__a = ConfigTester(self , config_class=A__ , has_text_modality=A__ )
def _UpperCAmelCase ( self : int ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[Any] ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A__ , **A__ , output_hidden_states=A__ )
def _UpperCAmelCase ( self : List[Any] ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def _UpperCAmelCase ( self : str ):
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def _UpperCAmelCase ( self : List[Any] ):
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def _UpperCAmelCase ( self : Any ):
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def _UpperCAmelCase ( self : List[str] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`" )
def _UpperCAmelCase ( self : Tuple ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCAmelCase ( self : int ):
pass
def _UpperCAmelCase ( self : Any ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(A__ )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A__ )
@slow
def _UpperCAmelCase ( self : List[str] ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__a = MaskaFormerModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def _UpperCAmelCase ( self : Tuple ):
__a = (self.model_tester.min_size,) * 2
__a = {
"pixel_values": torch.randn((2, 3, *size) , device=A__ ),
"mask_labels": torch.randn((2, 10, *size) , device=A__ ),
"class_labels": torch.zeros(2 , 10 , device=A__ ).long(),
}
__a = self.model_tester.get_config()
__a = MaskaFormerForUniversalSegmentation(A__ ).to(A__ )
__a = model(**A__ )
self.assertTrue(outputs.loss is not None )
def _UpperCAmelCase ( self : List[str] ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A__ , **A__ , output_hidden_states=A__ )
def _UpperCAmelCase ( self : Any ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(A__ ).to(A__ )
__a = model(**A__ , output_attentions=A__ )
self.assertTrue(outputs.attentions is not None )
def _UpperCAmelCase ( self : Dict ):
if not self.model_tester.is_training:
return
__a = self.all_model_classes[1]
__a , __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs()
__a = model_class(A__ )
model.to(A__ )
model.train()
__a = model(A__ , mask_labels=A__ , class_labels=A__ ).loss
loss.backward()
def _UpperCAmelCase ( self : Dict ):
__a = self.all_model_classes[1]
__a , __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs()
__a = True
__a = True
__a = model_class(A__ ).to(A__ )
model.train()
__a = model(A__ , mask_labels=A__ , class_labels=A__ )
__a = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__a = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__a = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__a = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
SCREAMING_SNAKE_CASE : int = 1e-4
def __A ( ):
"""simple docstring"""
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class A_ ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self : List[str] ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCAmelCase ( self : Union[str, Any] ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCAmelCase ( self : Optional[Any] ):
__a = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A__ )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(A__ , return_tensors="pt" ).to(A__ )
__a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__a = model(**A__ )
__a = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(A__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , A__ , atol=A__ ) )
__a = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(A__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , A__ , atol=A__ ) )
__a = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(A__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , A__ , atol=A__ ) )
def _UpperCAmelCase ( self : Optional[Any] ):
__a = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A__ ).eval()
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(A__ , return_tensors="pt" ).to(A__ )
__a = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__a = model(**A__ )
# masks_queries_logits
__a = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__a = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
__a = torch.tensor(A__ ).to(A__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , A__ , atol=A__ ) )
# class_queries_logits
__a = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__a = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(A__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , A__ , atol=A__ ) )
def _UpperCAmelCase ( self : Any ):
__a = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A__ ).eval()
__a = self.default_image_processor
__a = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
__a = inputs["pixel_values"].to(A__ )
__a = [el.to(A__ ) for el in inputs["mask_labels"]]
__a = [el.to(A__ ) for el in inputs["class_labels"]]
with torch.no_grad():
__a = model(**A__ )
self.assertTrue(outputs.loss is not None )
| 700 | from datetime import datetime as dt
import os
from github import Github
SCREAMING_SNAKE_CASE : Optional[Any] = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __A ( ):
"""simple docstring"""
__a = Github(os.environ["GITHUB_TOKEN"] )
__a = g.get_repo("huggingface/transformers" )
__a = repo.get_issues(state="open" )
for issue in open_issues:
__a = sorted([comment for comment in issue.get_comments()] , key=lambda _A : i.created_at , reverse=_A )
__a = comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 525 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='roformer'
def __init__( self : Dict , a : Any=5_0000 , a : List[Any]=None , a : str=768 , a : str=12 , a : Tuple=12 , a : Optional[Any]=3072 , a : List[str]="gelu" , a : List[Any]=0.1 , a : Union[str, Any]=0.1 , a : Tuple=1536 , a : List[str]=2 , a : Tuple=0.02 , a : Any=1e-12 , a : Optional[int]=0 , a : Union[str, Any]=False , a : int=True , **a : str , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size if embedding_size is None else embedding_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[str] = rotary_value
SCREAMING_SNAKE_CASE : int = use_cache
class _UpperCamelCase ( __A ):
'''simple docstring'''
@property
def __UpperCamelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE : str = {0: "batch", 1: "sequence"}
SCREAMING_SNAKE_CASE : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] ) | 25 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : List[Any] = logging.get_logger(__name__)
UpperCamelCase_ : Tuple = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = """rwkv"""
SCREAMING_SNAKE_CASE_ : Any = {"""max_position_embeddings""": """context_length"""}
def __init__( self ,_SCREAMING_SNAKE_CASE=50_277 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=4_096 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=6 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,**_SCREAMING_SNAKE_CASE ,) -> List[str]:
_snake_case = vocab_size
_snake_case = context_length
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = attention_hidden_size if attention_hidden_size is not None else hidden_size
_snake_case = intermediate_size if intermediate_size is not None else 4 * hidden_size
_snake_case = layer_norm_epsilon
_snake_case = rescale_every
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
super().__init__(
tie_word_embeddings=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
| 185 | 0 |
import argparse
from collections import defaultdict
def lowercase__(A , A , A , A , A ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any]= f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_lowerCamelCase , "r" ) as f:
lowercase__ : Union[str, Any]= f.readlines()
lowercase__ : List[Any]= f'''class {class_name}('''
lowercase__ : Union[str, Any]= f'''{4 * ' '}def {test_name}('''
lowercase__ : Optional[int]= f'''{8 * ' '}{correct_line.split()[0]}'''
lowercase__ : str= f'''{16 * ' '}{correct_line.split()[0]}'''
lowercase__ : int= False
lowercase__ : str= False
lowercase__ : List[Any]= False
lowercase__ : List[str]= False
lowercase__ : Tuple= 0
lowercase__ : str= 0
lowercase__ : List[Any]= []
for line in lines:
if line.startswith(_lowerCamelCase ):
lowercase__ : Optional[Any]= True
elif in_class and line.startswith(_lowerCamelCase ):
lowercase__ : List[Any]= True
elif in_class and in_func and (line.startswith(_lowerCamelCase ) or line.startswith(_lowerCamelCase )):
lowercase__ : Any= len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowercase__ : Any= True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowercase__ : Optional[int]= True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * ' '}{correct_line}''' )
lowercase__ : int= False
else:
new_lines.append(_lowerCamelCase )
with open(_lowerCamelCase , "w" ) as f:
for line in new_lines:
f.write(_lowerCamelCase )
def lowercase__(A , A=None ) ->Tuple:
"""simple docstring"""
if fail is not None:
with open(_lowerCamelCase , "r" ) as f:
lowercase__ : Dict= {l.strip() for l in f.readlines()}
else:
lowercase__ : List[Any]= None
with open(_lowerCamelCase , "r" ) as f:
lowercase__ : int= f.readlines()
lowercase__ : List[str]= defaultdict(_lowerCamelCase )
for line in correct_lines:
lowercase__ : str= line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
a : Optional[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 705 |
"""simple docstring"""
def lowercase__(A ) ->list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ : list= []
for temp in range(int(A ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
a : Dict = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 85 | 0 |
from __future__ import annotations
from random import choice
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
return choice(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = random_pivot(__SCREAMING_SNAKE_CASE )
# partition based on pivot
# linear time
UpperCAmelCase_ = [e for e in lst if e < pivot]
UpperCAmelCase_ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__SCREAMING_SNAKE_CASE ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__SCREAMING_SNAKE_CASE ) < k - 1:
return kth_number(__SCREAMING_SNAKE_CASE , k - len(__SCREAMING_SNAKE_CASE ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 579 |
import requests
SCREAMING_SNAKE_CASE = "" # <-- Put your OpenWeatherMap appid here!
SCREAMING_SNAKE_CASE = "https://api.openweathermap.org/data/2.5/"
def snake_case__ ( __SCREAMING_SNAKE_CASE = "Chicago" , __SCREAMING_SNAKE_CASE = APPID ) -> dict:
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def snake_case__ ( __SCREAMING_SNAKE_CASE = "Kolkata, India" , __SCREAMING_SNAKE_CASE = APPID ) -> dict:
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def snake_case__ ( __SCREAMING_SNAKE_CASE = 55.68 , __SCREAMING_SNAKE_CASE = 12.57 , __SCREAMING_SNAKE_CASE = APPID ) -> dict:
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
SCREAMING_SNAKE_CASE = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 579 | 1 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
def __init__( self,__lowerCamelCase = "▁",__lowerCamelCase = True,__lowerCamelCase = "<unk>",__lowerCamelCase = "</s>",__lowerCamelCase = "<pad>",) -> str:
A__ = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
A__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
A__ = token_dict['''token''']
A__ = Tokenizer(Unigram() )
A__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ),''' ''' ),
normalizers.Lowercase(),
] )
A__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__lowerCamelCase,add_prefix_space=__lowerCamelCase ),
pre_tokenizers.Digits(individual_digits=__lowerCamelCase ),
pre_tokenizers.Punctuation(),
] )
A__ = decoders.Metaspace(replacement=__lowerCamelCase,add_prefix_space=__lowerCamelCase )
A__ = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}",special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])],)
A__ = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(__lowerCamelCase,__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = 8000,__lowerCamelCase = True,) -> List[Any]:
A__ = trainers.UnigramTrainer(
vocab_size=__lowerCamelCase,special_tokens=self.special_tokens_list,show_progress=__lowerCamelCase,)
if isinstance(__lowerCamelCase,__lowerCamelCase ):
A__ = [files]
self._tokenizer.train(__lowerCamelCase,trainer=__lowerCamelCase )
self.add_unk_id()
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = 8000,__lowerCamelCase = True,) -> str:
A__ = trainers.UnigramTrainer(
vocab_size=__lowerCamelCase,special_tokens=self.special_tokens_list,show_progress=__lowerCamelCase,)
self._tokenizer.train_from_iterator(__lowerCamelCase,trainer=__lowerCamelCase )
self.add_unk_id()
def UpperCamelCase ( self ) -> Union[str, Any]:
A__ = json.loads(self._tokenizer.to_str() )
A__ = self.special_tokens['''unk''']['''id''']
A__ = Tokenizer.from_str(json.dumps(__lowerCamelCase ) )
| 701 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def UpperCamelCase ( *__lowerCamelCase,**__lowerCamelCase ):
pass
def UpperCamelCase__( UpperCamelCase__ : Any )->List[str]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a__: Optional[int] = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = pipeline(
'''document-question-answering''',model=__lowerCamelCase,tokenizer=__lowerCamelCase,image_processor=__lowerCamelCase )
A__ = INVOICE_URL
A__ = list(zip(*apply_tesseract(load_image(__lowerCamelCase ),__lowerCamelCase,'''''' ) ) )
A__ = '''What is the placebo?'''
A__ = [
{
'''image''': load_image(__lowerCamelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = dqa_pipeline(__lowerCamelCase,top_k=2 )
self.assertEqual(
__lowerCamelCase,[
[
{'''score''': ANY(__lowerCamelCase ), '''answer''': ANY(__lowerCamelCase ), '''start''': ANY(__lowerCamelCase ), '''end''': ANY(__lowerCamelCase )},
{'''score''': ANY(__lowerCamelCase ), '''answer''': ANY(__lowerCamelCase ), '''start''': ANY(__lowerCamelCase ), '''end''': ANY(__lowerCamelCase )},
]
]
* 3,)
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase ( self ):
A__ = pipeline('''document-question-answering''',model='''hf-internal-testing/tiny-random-layoutlmv2''' )
A__ = INVOICE_URL
A__ = '''How many cats are there?'''
A__ = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
A__ = dqa_pipeline(image=__lowerCamelCase,question=__lowerCamelCase,top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase,decimals=4 ),__lowerCamelCase )
A__ = dqa_pipeline({'''image''': image, '''question''': question},top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase,decimals=4 ),__lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
A__ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
A__ = dqa_pipeline(image=__lowerCamelCase,question=__lowerCamelCase,top_k=2 )
self.assertEqual(__lowerCamelCase,[] )
# We can optionnally pass directly the words and bounding boxes
A__ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
A__ = []
A__ = []
A__ = dqa_pipeline(image=__lowerCamelCase,question=__lowerCamelCase,words=__lowerCamelCase,boxes=__lowerCamelCase,top_k=2 )
self.assertEqual(__lowerCamelCase,[] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase ( self ):
A__ = pipeline(
'''document-question-answering''',model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''',revision='''9977165''',)
A__ = INVOICE_URL
A__ = '''What is the invoice number?'''
A__ = dqa_pipeline(image=__lowerCamelCase,question=__lowerCamelCase,top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],)
A__ = dqa_pipeline({'''image''': image, '''question''': question},top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],)
A__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}],top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2,)
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCamelCase ( self ):
A__ = pipeline(
'''document-question-answering''',model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''',revision='''9977165''',max_seq_len=50,)
A__ = INVOICE_URL
A__ = '''What is the invoice number?'''
A__ = dqa_pipeline(image=__lowerCamelCase,question=__lowerCamelCase,top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],)
A__ = dqa_pipeline({'''image''': image, '''question''': question},top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],)
A__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}],top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2,)
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase ( self ):
A__ = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''',revision='''3dc6de3''',add_prefix_space=__lowerCamelCase )
A__ = pipeline(
'''document-question-answering''',model='''impira/layoutlm-document-qa''',tokenizer=__lowerCamelCase,revision='''3dc6de3''',)
A__ = INVOICE_URL
A__ = '''What is the invoice number?'''
A__ = dqa_pipeline(image=__lowerCamelCase,question=__lowerCamelCase,top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
],)
A__ = dqa_pipeline({'''image''': image, '''question''': question},top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
],)
A__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}],top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2,)
A__ = list(zip(*apply_tesseract(load_image(__lowerCamelCase ),__lowerCamelCase,'''''' ) ) )
# This model should also work if `image` is set to None
A__ = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question},top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
],)
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCamelCase ( self ):
A__ = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''',revision='''3dc6de3''',add_prefix_space=__lowerCamelCase )
A__ = pipeline(
'''document-question-answering''',model='''impira/layoutlm-document-qa''',tokenizer=__lowerCamelCase,revision='''3dc6de3''',max_seq_len=50,)
A__ = INVOICE_URL
A__ = '''What is the invoice number?'''
A__ = dqa_pipeline(image=__lowerCamelCase,question=__lowerCamelCase,top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],)
A__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}],top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2,)
A__ = list(zip(*apply_tesseract(load_image(__lowerCamelCase ),__lowerCamelCase,'''''' ) ) )
# This model should also work if `image` is set to None
A__ = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question},top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],)
@slow
@require_torch
def UpperCamelCase ( self ):
A__ = pipeline(
'''document-question-answering''',model='''naver-clova-ix/donut-base-finetuned-docvqa''',tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ),feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''',)
A__ = INVOICE_URL
A__ = '''What is the invoice number?'''
A__ = dqa_pipeline(image=__lowerCamelCase,question=__lowerCamelCase,top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase,decimals=4 ),[{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def UpperCamelCase ( self ):
pass
| 212 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , _snake_case : List[str] , _snake_case : Union[str, Any]=7 , _snake_case : Any=3 , _snake_case : List[Any]=18 , _snake_case : Union[str, Any]=30 , _snake_case : Optional[int]=4_00 , _snake_case : Union[str, Any]=True , _snake_case : str=None , _snake_case : List[str]=True , _snake_case : str=None , _snake_case : Union[str, Any]=True , _snake_case : List[str]=[0.4814_5466, 0.457_8275, 0.4082_1073] , _snake_case : List[Any]=[0.2686_2954, 0.2613_0258, 0.2757_7711] , _snake_case : Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {'height': 2_24, 'width': 2_24}
A__ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_convert_rgb
def _a ( self : int ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def _a ( self : Any , _snake_case : Optional[int]=False , _snake_case : str=False , _snake_case : Optional[Any]=False ):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
A__ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
A__ = []
for i in range(self.batch_size ):
A__ , A__ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
A__ = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs]
if torchify:
A__ = [torch.from_numpy(_snake_case ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = ChineseCLIPImageProcessingTester(self , do_center_crop=_snake_case )
@property
def _a ( self : str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(_snake_case , 'image_mean' ) )
self.assertTrue(hasattr(_snake_case , 'image_std' ) )
self.assertTrue(hasattr(_snake_case , 'do_convert_rgb' ) )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 2_24, 'width': 2_24} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A__ = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A__ = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A__ = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_snake_case )
A__ = 3
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(_snake_case , 'image_mean' ) )
self.assertTrue(hasattr(_snake_case , 'image_std' ) )
self.assertTrue(hasattr(_snake_case , 'do_convert_rgb' ) )
def _a ( self : str ):
"""simple docstring"""
pass
def _a ( self : str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = self.image_processor_tester.prepare_inputs(equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A__ = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 9 | """simple docstring"""
import os
def lowerCAmelCase_ () -> List[str]:
a_ : List[Any] = os.path.join(os.path.dirname(_SCREAMING_SNAKE_CASE ) , "num.txt" )
with open(_SCREAMING_SNAKE_CASE ) as file_hand:
return str(sum(int(_SCREAMING_SNAKE_CASE ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 473 | 0 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''efficientformer'''
def __init__( self , snake_case_ = [3, 2, 6, 4] , snake_case_ = [48, 96, 224, 448] , snake_case_ = [True, True, True, True] , snake_case_ = 448 , snake_case_ = 32 , snake_case_ = 4 , snake_case_ = 7 , snake_case_ = 5 , snake_case_ = 8 , snake_case_ = 4 , snake_case_ = 0.0 , snake_case_ = 16 , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = 2 , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = 1 , snake_case_ = True , snake_case_ = True , snake_case_ = 1e-5 , snake_case_ = "gelu" , snake_case_ = 0.02 , snake_case_ = 1e-1_2 , snake_case_ = 224 , snake_case_ = 1e-0_5 , **snake_case_ , ) -> None:
super().__init__(**snake_case_ )
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = depths
__lowerCAmelCase = mlp_expansion_ratio
__lowerCAmelCase = downsamples
__lowerCAmelCase = dim
__lowerCAmelCase = key_dim
__lowerCAmelCase = attention_ratio
__lowerCAmelCase = resolution
__lowerCAmelCase = pool_size
__lowerCAmelCase = downsample_patch_size
__lowerCAmelCase = downsample_stride
__lowerCAmelCase = downsample_pad
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = num_metaad_blocks
__lowerCAmelCase = distillation
__lowerCAmelCase = use_layer_scale
__lowerCAmelCase = layer_scale_init_value
__lowerCAmelCase = image_size
__lowerCAmelCase = batch_norm_eps
| 701 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , **snake_case_ ) -> Optional[Any]:
super().__init__(**snake_case_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(snake_case_ )
def A__ ( self , **snake_case_ ) -> Optional[Any]:
__lowerCAmelCase = {}
__lowerCAmelCase = {}
__lowerCAmelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
__lowerCAmelCase = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
__lowerCAmelCase = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
__lowerCAmelCase = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
__lowerCAmelCase = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
__lowerCAmelCase = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
__lowerCAmelCase = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
__lowerCAmelCase = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
__lowerCAmelCase = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
__lowerCAmelCase = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
__lowerCAmelCase = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
__lowerCAmelCase = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
__lowerCAmelCase = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , snake_case_ , *snake_case_ , snake_case_=None , snake_case_=None , **snake_case_ ) -> Union[str, Any]:
return super().__call__(snake_case_ , *snake_case_ , num_workers=snake_case_ , batch_size=snake_case_ , **snake_case_ )
def A__ ( self , snake_case_ , snake_case_=64 , snake_case_ = 0 , snake_case_ = 512 / 1_500 , snake_case_ = 32 , snake_case_ = 1 , ) -> Optional[int]:
__lowerCAmelCase = load_image(snake_case_ )
__lowerCAmelCase = self.image_processor.size["""longest_edge"""]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.image_processor.generate_crop_boxes(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__lowerCAmelCase = self.image_processor(images=snake_case_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
__lowerCAmelCase = self.get_inference_context()
with inference_context():
__lowerCAmelCase = self._ensure_tensor_on_device(snake_case_ , device=self.device )
__lowerCAmelCase = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
__lowerCAmelCase = image_embeddings
__lowerCAmelCase = grid_points.shape[1]
__lowerCAmelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , snake_case_ , snake_case_ ):
__lowerCAmelCase = grid_points[:, i : i + points_per_batch, :, :]
__lowerCAmelCase = input_labels[:, i : i + points_per_batch]
__lowerCAmelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def A__ ( self , snake_case_ , snake_case_=0.88 , snake_case_=0.95 , snake_case_=0 , snake_case_=1 , ) -> Dict:
__lowerCAmelCase = model_inputs.pop("""input_boxes""" )
__lowerCAmelCase = model_inputs.pop("""is_last""" )
__lowerCAmelCase = model_inputs.pop("""original_sizes""" ).tolist()
__lowerCAmelCase = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
__lowerCAmelCase = self.model(**snake_case_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__lowerCAmelCase = model_outputs["""pred_masks"""]
__lowerCAmelCase = self.image_processor.post_process_masks(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , binarize=snake_case_ )
__lowerCAmelCase = model_outputs["""iou_scores"""]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def A__ ( self , snake_case_ , snake_case_=False , snake_case_=False , snake_case_=0.7 , ) -> str:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
__lowerCAmelCase = torch.cat(snake_case_ )
__lowerCAmelCase = torch.cat(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.image_processor.post_process_for_mask_generation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__lowerCAmelCase = defaultdict(snake_case_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(snake_case_ )
__lowerCAmelCase = {}
if output_rle_mask:
__lowerCAmelCase = rle_mask
if output_bboxes_mask:
__lowerCAmelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 573 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , '''num_encoder_blocks''' ) )
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=64 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=[2, 2, 2, 2] , _UpperCAmelCase=[8, 4, 2, 1] , _UpperCAmelCase=[16, 32, 64, 128] , _UpperCAmelCase=[1, 4, 8, 16] , _UpperCAmelCase=[1, 2, 4, 8] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=None , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : Optional[int] = image_size
__a : List[str] = num_channels
__a : List[str] = num_encoder_blocks
__a : int = sr_ratios
__a : str = depths
__a : Any = hidden_sizes
__a : Optional[int] = downsampling_rates
__a : List[Any] = num_attention_heads
__a : Optional[Any] = is_training
__a : int = use_labels
__a : List[Any] = hidden_act
__a : Any = hidden_dropout_prob
__a : Optional[Any] = attention_probs_dropout_prob
__a : Optional[int] = initializer_range
__a : Any = num_labels
__a : str = scope
def _lowerCamelCase ( self ):
__a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : int = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = SegformerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Any = model(_UpperCAmelCase )
__a : Tuple = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = self.num_labels
__a : int = SegformerForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__a : Optional[int] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = 1
__a : Optional[Any] = SegformerForSemanticSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_UpperCAmelCase )
__a : str = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCamelCase ( self ):
__a : str = self.prepare_config_and_inputs()
__a , __a , __a : str = config_and_inputs
__a : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : List[Any] = SegformerModelTester(self )
__a : Dict = SegformerConfigTester(self , config_class=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_UpperCAmelCase )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_UpperCAmelCase )
__a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Tuple = [*signature.parameters.keys()]
__a : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Tuple = True
for model_class in self.all_model_classes:
__a : List[str] = True
__a : Any = False
__a : str = True
__a : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__a : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__a : int = outputs.attentions
__a : int = sum(self.model_tester.depths )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Tuple = True
__a : int = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__a : Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__a : Dict = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first attentions (first block, first layer)
__a : int = (self.model_tester.image_size // 4) ** 2
__a : int = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__a : int = (self.model_tester.image_size // 32) ** 2
__a : Dict = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__a : int = len(_UpperCAmelCase )
# Check attention is always last and order is fine
__a : Union[str, Any] = True
__a : Tuple = True
__a : List[str] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__a : Tuple = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCAmelCase ) )
__a : Tuple = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first attentions (first block, first layer)
__a : Optional[Any] = (self.model_tester.image_size // 4) ** 2
__a : List[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowerCamelCase ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__a : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__a : Optional[Any] = outputs.hidden_states
__a : Optional[Any] = self.model_tester.num_encoder_blocks
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : int = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
if not self.model_tester.is_training:
return
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ):
continue
__a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
__a : Optional[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
__a : List[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCamelCase ( self ):
pass
@slow
def _lowerCamelCase ( self ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Dict = SegformerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __A ( ) -> Optional[int]:
__a : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
# only resize + normalize
__a : List[str] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
__a : List[Any] = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_UpperCAmelCase )
__a : Union[str, Any] = prepare_img()
__a : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
__a : Optional[Any] = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
__a : Dict = model(_UpperCAmelCase )
__a : int = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__a : List[str] = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def _lowerCamelCase ( self ):
# only resize + normalize
__a : Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
__a : str = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(_UpperCAmelCase )
__a : List[str] = prepare_img()
__a : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
__a : Union[str, Any] = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
__a : str = model(_UpperCAmelCase )
__a : Optional[int] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__a : Optional[Any] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-1 ) )
@slow
def _lowerCamelCase ( self ):
# only resize + normalize
__a : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
__a : Tuple = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_UpperCAmelCase )
__a : List[str] = prepare_img()
__a : List[str] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
__a : Union[str, Any] = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
__a : Tuple = model(_UpperCAmelCase )
__a : int = outputs.logits.detach().cpu()
__a : List[Any] = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(500, 300)] )
__a : str = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__a : int = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__a : Optional[int] = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase ) | 52 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A = logging.getLogger(__name__)
def __A ( a_ :Union[str, Any] , a_ :Dict) -> Union[str, Any]:
__a : Optional[int] = np.argmax(a_ , axis=1)
return np.sum(outputs == labels)
def __A ( a_ :Any) -> str:
with open(a_ , encoding='''utf_8''') as f:
__a : List[Any] = csv.reader(a_)
__a : List[str] = []
next(a_) # skip the first line
for line in tqdm(a_):
output.append((''' '''.join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def __A ( a_ :Dict , a_ :str , a_ :str , a_ :List[Any] , a_ :Tuple , a_ :List[Any]) -> Any:
__a : List[str] = []
for dataset in encoded_datasets:
__a : List[str] = len(a_)
__a : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
__a : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa)
__a : Tuple = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa)
__a : Optional[Any] = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(a_):
__a : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = with_conta
__a : int = with_conta
__a : List[str] = len(a_) - 1
__a : int = len(a_) - 1
__a : Optional[int] = with_conta
__a : Tuple = with_conta
__a : List[Any] = mc_label
__a : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(a_) for t in all_inputs))
return tensor_datasets
def __A ( ) -> Union[str, Any]:
__a : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , default='''openai-gpt''' , help='''pretrained model name''')
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''')
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''')
parser.add_argument(
'''--output_dir''' , default=a_ , type=a_ , required=a_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=a_ , default='''''')
parser.add_argument('''--eval_dataset''' , type=a_ , default='''''')
parser.add_argument('''--seed''' , type=a_ , default=42)
parser.add_argument('''--num_train_epochs''' , type=a_ , default=3)
parser.add_argument('''--train_batch_size''' , type=a_ , default=8)
parser.add_argument('''--eval_batch_size''' , type=a_ , default=16)
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=a_ , help='''Epsilon for Adam optimizer.''')
parser.add_argument('''--max_grad_norm''' , type=a_ , default=1)
parser.add_argument(
'''--max_steps''' , default=-1 , type=a_ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=a_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=a_ , default=6.25e-5)
parser.add_argument('''--warmup_steps''' , default=0 , type=a_ , help='''Linear warmup over warmup_steps.''')
parser.add_argument('''--lr_schedule''' , type=a_ , default='''warmup_linear''')
parser.add_argument('''--weight_decay''' , type=a_ , default=0.0_1)
parser.add_argument('''--lm_coef''' , type=a_ , default=0.9)
parser.add_argument('''--n_valid''' , type=a_ , default=3_74)
parser.add_argument('''--server_ip''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
__a : str = parser.parse_args()
print(a_)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a_)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
__a : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
__a : str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(a_ , a_))
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a : List[str] = ['''_start_''', '''_delimiter_''', '''_classify_''']
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(a_)
__a : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_)
__a : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(a_))
model.to(a_)
# Load and encode the datasets
def tokenize_and_encode(a_ :List[Any]):
if isinstance(a_ , a_):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(a_))
elif isinstance(a_ , a_):
return obj
return [tokenize_and_encode(a_) for o in obj]
logger.info('''Encoding dataset...''')
__a : Dict = load_rocstories_dataset(args.train_dataset)
__a : int = load_rocstories_dataset(args.eval_dataset)
__a : Optional[int] = (train_dataset, eval_dataset)
__a : List[Any] = tokenize_and_encode(a_)
# Compute the max input length for the Transformer
__a : List[Any] = model.config.n_positions // 2 - 2
__a : int = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
__a : Union[str, Any] = min(a_ , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a : Tuple = pre_process_datasets(a_ , a_ , a_ , *a_)
__a , __a : Tuple = tensor_datasets[0], tensor_datasets[1]
__a : List[str] = TensorDataset(*a_)
__a : Optional[Any] = RandomSampler(a_)
__a : str = DataLoader(a_ , sampler=a_ , batch_size=args.train_batch_size)
__a : List[str] = TensorDataset(*a_)
__a : Optional[int] = SequentialSampler(a_)
__a : Optional[Any] = DataLoader(a_ , sampler=a_ , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a : int = args.max_steps
__a : Optional[int] = args.max_steps // (len(a_) // args.gradient_accumulation_steps) + 1
else:
__a : str = len(a_) // args.gradient_accumulation_steps * args.num_train_epochs
__a : List[Any] = list(model.named_parameters())
__a : Optional[int] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__a : List[str] = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], '''weight_decay''': 0.0},
]
__a : int = AdamW(a_ , lr=args.learning_rate , eps=args.adam_epsilon)
__a : Union[str, Any] = get_linear_schedule_with_warmup(
a_ , num_warmup_steps=args.warmup_steps , num_training_steps=a_)
if args.do_train:
__a , __a , __a : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc='''Epoch'''):
__a : Dict = 0
__a : Dict = 0
__a : List[str] = tqdm(a_ , desc='''Training''')
for step, batch in enumerate(a_):
__a : Dict = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : str = batch
__a : List[Any] = model(a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a : int = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a : Tuple = '''Training loss: {:.2e} lr: {:.2e}'''.format(a_ , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a : Dict = model.module if hasattr(a_ , '''module''') else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a : int = os.path.join(args.output_dir , a_)
__a : str = os.path.join(args.output_dir , a_)
torch.save(model_to_save.state_dict() , a_)
model_to_save.config.to_json_file(a_)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
__a : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(a_)
if args.do_eval:
model.eval()
__a , __a : List[Any] = 0, 0
__a , __a : Union[str, Any] = 0, 0
for batch in tqdm(a_ , desc='''Evaluating'''):
__a : str = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : List[Any] = batch
with torch.no_grad():
__a , __a , __a , __a : str = model(
a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : List[str] = mc_logits.detach().cpu().numpy()
__a : Optional[Any] = mc_labels.to('''cpu''').numpy()
__a : str = accuracy(a_ , a_)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
__a : Tuple = eval_loss / nb_eval_steps
__a : List[str] = eval_accuracy / nb_eval_examples
__a : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
__a : List[str] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__a : Dict = os.path.join(args.output_dir , '''eval_results.txt''')
with open(a_ , '''w''') as writer:
logger.info('''***** Eval results *****''')
for key in sorted(result.keys()):
logger.info(''' %s = %s''' , a_ , str(result[key]))
writer.write('''%s = %s\n''' % (key, str(result[key])))
if __name__ == "__main__":
main() | 52 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : int=True , __lowerCamelCase : Tuple="pt" ):
'''simple docstring'''
_UpperCAmelCase : Any ={'add_prefix_space': True} if isinstance(snake_case__ , snake_case__ ) and not line.startswith(' ' ) else {}
_UpperCAmelCase : str =padding_side
return tokenizer(
[line] , max_length=snake_case__ , padding='max_length' if pad_to_max_length else None , truncation=snake_case__ , return_tensors=snake_case__ , add_special_tokens=snake_case__ , **snake_case__ , )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any]=None , ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =input_ids.ne(snake_case__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __magic_name__ ( __lowerCAmelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case="train" , snake_case=None , snake_case=None , snake_case=None , snake_case="" , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : str =Path(lowerCAmelCase_).joinpath(type_path + '.source')
_UpperCAmelCase : List[str] =Path(lowerCAmelCase_).joinpath(type_path + '.target')
_UpperCAmelCase : Union[str, Any] =self.get_char_lens(self.src_file)
_UpperCAmelCase : List[Any] =max_source_length
_UpperCAmelCase : List[Any] =max_target_length
assert min(self.src_lens) > 0, f"found empty line in {self.src_file}"
_UpperCAmelCase : Dict =tokenizer
_UpperCAmelCase : Optional[int] =prefix
if n_obs is not None:
_UpperCAmelCase : Union[str, Any] =self.src_lens[:n_obs]
_UpperCAmelCase : Dict =src_lang
_UpperCAmelCase : List[str] =tgt_lang
def __len__( self) -> Optional[Any]:
'''simple docstring'''
return len(self.src_lens)
def __getitem__( self , snake_case) -> Dict[str, torch.Tensor]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =index + 1 # linecache starts at 1
_UpperCAmelCase : int =self.prefix + linecache.getline(str(self.src_file) , lowerCAmelCase_).rstrip('\n')
_UpperCAmelCase : Union[str, Any] =linecache.getline(str(self.tgt_file) , lowerCAmelCase_).rstrip('\n')
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase_):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase : str =(
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase_) else self.tokenizer
)
_UpperCAmelCase : int =self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase_) else self.tokenizer
_UpperCAmelCase : List[Any] =encode_line(lowerCAmelCase_ , lowerCAmelCase_ , self.max_source_length , 'right')
_UpperCAmelCase : int =encode_line(lowerCAmelCase_ , lowerCAmelCase_ , self.max_target_length , 'right')
_UpperCAmelCase : Any =source_inputs['input_ids'].squeeze()
_UpperCAmelCase : Union[str, Any] =target_inputs['input_ids'].squeeze()
_UpperCAmelCase : Union[str, Any] =source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase ( snake_case) -> Optional[int]:
'''simple docstring'''
return [len(lowerCAmelCase_) for x in Path(lowerCAmelCase_).open().readlines()]
def lowerCAmelCase ( self , snake_case) -> Dict[str, torch.Tensor]:
'''simple docstring'''
_UpperCAmelCase : str =torch.stack([x['input_ids'] for x in batch])
_UpperCAmelCase : Tuple =torch.stack([x['attention_mask'] for x in batch])
_UpperCAmelCase : Tuple =torch.stack([x['decoder_input_ids'] for x in batch])
_UpperCAmelCase : Optional[Any] =(
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase_)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase : List[Any] =(
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase_)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase : List[str] =trim_batch(lowerCAmelCase_ , lowerCAmelCase_)
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] =trim_batch(lowerCAmelCase_ , lowerCAmelCase_ , attention_mask=lowerCAmelCase_)
_UpperCAmelCase : Optional[int] ={
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowercase =getLogger(__name__)
def lowerCamelCase__ ( __lowerCamelCase : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(snake_case__ ) )
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
_UpperCAmelCase : Dict =get_git_info()
save_json(snake_case__ , os.path.join(snake_case__ , 'git_log.json' ) )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=4 , **__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
with open(snake_case__ , 'w' ) as f:
json.dump(snake_case__ , snake_case__ , indent=snake_case__ , **snake_case__ )
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
'''simple docstring'''
with open(snake_case__ ) as f:
return json.load(snake_case__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : str =git.Repo(search_parent_directories=snake_case__ )
_UpperCAmelCase : Tuple ={
'repo_id': str(snake_case__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase__ ( __lowerCamelCase : Callable , __lowerCamelCase : Iterable ):
'''simple docstring'''
return list(map(snake_case__ , snake_case__ ) )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] ):
'''simple docstring'''
with open(snake_case__ , 'wb' ) as f:
return pickle.dump(snake_case__ , snake_case__ )
def lowerCamelCase__ ( __lowerCamelCase : int ):
'''simple docstring'''
def remove_articles(__lowerCamelCase : List[str] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , snake_case__ )
def white_space_fix(__lowerCamelCase : List[Any] ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : List[Any] ):
_UpperCAmelCase : Union[str, Any] =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__ ) ) ) )
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Dict ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =normalize_answer(snake_case__ ).split()
_UpperCAmelCase : List[Any] =normalize_answer(snake_case__ ).split()
_UpperCAmelCase : List[Any] =Counter(snake_case__ ) & Counter(snake_case__ )
_UpperCAmelCase : Dict =sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase : Union[str, Any] =1.0 * num_same / len(snake_case__ )
_UpperCAmelCase : List[Any] =1.0 * num_same / len(snake_case__ )
_UpperCAmelCase : Any =(2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return normalize_answer(snake_case__ ) == normalize_answer(snake_case__ )
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ):
'''simple docstring'''
assert len(snake_case__ ) == len(snake_case__ )
_UpperCAmelCase : List[Any] =0
for hypo, pred in zip(snake_case__ , snake_case__ ):
em += exact_match_score(snake_case__ , snake_case__ )
if len(snake_case__ ) > 0:
em /= len(snake_case__ )
return {"em": em}
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] ={p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase : Tuple ='dropout_rate'
for p in extra_params:
if getattr(snake_case__ , snake_case__ , snake_case__ ):
if not hasattr(snake_case__ , snake_case__ ) and not hasattr(snake_case__ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(snake_case__ ) )
delattr(snake_case__ , snake_case__ )
continue
_UpperCAmelCase : str =p if hasattr(snake_case__ , snake_case__ ) else equivalent_param[p]
setattr(snake_case__ , snake_case__ , getattr(snake_case__ , snake_case__ ) )
delattr(snake_case__ , snake_case__ )
return hparams, config
| 717 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =DistilBertTokenizer
UpperCAmelCase =DistilBertTokenizerFast
UpperCAmelCase =True
@slow
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCAmelCase : Dict =DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
_UpperCAmelCase : List[Any] =tokenizer.encode('sequence builders' , add_special_tokens=snake_case)
_UpperCAmelCase : List[Any] =tokenizer.encode('multi-sequence build' , add_special_tokens=snake_case)
_UpperCAmelCase : Optional[Any] =tokenizer.build_inputs_with_special_tokens(snake_case)
_UpperCAmelCase : Dict =tokenizer.build_inputs_with_special_tokens(snake_case , snake_case)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 331 | 0 |
lowerCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCAmelCase : List[str] = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCAmelCase : Union[str, Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 202 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class __lowercase ( a_ ):
"""simple docstring"""
def __init__( self , A , A ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=A , scheduler=A )
@torch.no_grad()
def __call__( self , A = 1 , A = None , A = 50 , A = "pil" , A = True , **A , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
lowerCamelCase = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A , )
lowerCamelCase = image.to(self.device )
# set step values
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase = self.unet(A , A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase = self.scheduler.step(A , A , A ).prev_sample
lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase = self.numpy_to_pil(A )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=A ), "This is a local test"
| 457 | 0 |
import random
from typing import Any
def _UpperCAmelCase ( a : list ):
for _ in range(len(a ) ):
snake_case__ = random.randint(0 , len(a ) - 1 )
snake_case__ = random.randint(0 , len(a ) - 1 )
snake_case__ , snake_case__ = data[b], data[a]
return data
if __name__ == "__main__":
a__ = [0, 1, 2, 3, 4, 5, 6, 7]
a__ = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 99 |
from __future__ import annotations
import time
a__ = list[tuple[int, int]]
a__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Node | None):
'''simple docstring'''
snake_case__ = pos_x
snake_case__ = pos_y
snake_case__ = (pos_y, pos_x)
snake_case__ = goal_x
snake_case__ = goal_y
snake_case__ = parent
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : tuple[int, int]):
'''simple docstring'''
snake_case__ = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCamelCase__)
snake_case__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCamelCase__)
snake_case__ = [self.start]
snake_case__ = False
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
while self.node_queue:
snake_case__ = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
snake_case__ = True
return self.retrace_path(UpperCamelCase__)
snake_case__ = self.get_successors(UpperCamelCase__)
for node in successors:
self.node_queue.append(UpperCamelCase__)
if not self.reached:
return [self.start.pos]
return None
def __magic_name__ ( self : Any , UpperCamelCase__ : Node):
'''simple docstring'''
snake_case__ = []
for action in delta:
snake_case__ = parent.pos_x + action[1]
snake_case__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(UpperCamelCase__) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCamelCase__ , UpperCamelCase__ , self.target.pos_y , self.target.pos_x , UpperCamelCase__))
return successors
def __magic_name__ ( self : str , UpperCamelCase__ : Node | None):
'''simple docstring'''
snake_case__ = node
snake_case__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
snake_case__ = current_node.parent
path.reverse()
return path
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any):
'''simple docstring'''
snake_case__ = BreadthFirstSearch(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = BreadthFirstSearch(UpperCamelCase__ , UpperCamelCase__)
snake_case__ = False
def __magic_name__ ( self : Any):
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ = self.fwd_bfs.node_queue.pop(0)
snake_case__ = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ = True
return self.retrace_bidirectional_path(
UpperCamelCase__ , UpperCamelCase__)
snake_case__ = current_bwd_node
snake_case__ = current_fwd_node
snake_case__ = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCamelCase__),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCamelCase__),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCamelCase__)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Node , UpperCamelCase__ : Node):
'''simple docstring'''
snake_case__ = self.fwd_bfs.retrace_path(UpperCamelCase__)
snake_case__ = self.bwd_bfs.retrace_path(UpperCamelCase__)
bwd_path.pop()
bwd_path.reverse()
snake_case__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a__ = (0, 0)
a__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a__ = time.time()
a__ = BreadthFirstSearch(init, goal)
a__ = bfs.search()
a__ = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
a__ = time.time()
a__ = BidirectionalBreadthFirstSearch(init, goal)
a__ = bd_bfs.search()
a__ = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 99 | 1 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock()
__SCREAMING_SNAKE_CASE : Tuple = 5_0_0
__SCREAMING_SNAKE_CASE : str = {}
__SCREAMING_SNAKE_CASE : str = HTTPError
__SCREAMING_SNAKE_CASE : str = {}
# Download this model to make sure it's in the cache.
__SCREAMING_SNAKE_CASE : Optional[Any] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowercase ) as mock_head:
__SCREAMING_SNAKE_CASE : List[str] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _snake_case ( self ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock()
__SCREAMING_SNAKE_CASE : str = 5_0_0
__SCREAMING_SNAKE_CASE : Any = {}
__SCREAMING_SNAKE_CASE : int = HTTPError
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
# Download this model to make sure it's in the cache.
__SCREAMING_SNAKE_CASE : Optional[Any] = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowercase ) as mock_head:
__SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self ) -> Dict:
'''simple docstring'''
try:
__SCREAMING_SNAKE_CASE : List[str] = tempfile.mktemp()
with open(lowercase , '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , lowercase )
__SCREAMING_SNAKE_CASE : Tuple = AlbertTokenizer.from_pretrained(lowercase )
finally:
os.remove(lowercase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : str = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
__a : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _snake_case ( cls ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = TOKEN
HfFolder.save_token(lowercase )
@classmethod
def _snake_case ( cls ) -> List[str]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def _snake_case ( self ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE : Any = os.path.join(lowercase , '''vocab.txt''' )
with open(lowercase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = BertTokenizer(lowercase )
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Optional[Any] = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase , repo_id='''test-tokenizer''' , push_to_hub=lowercase , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _snake_case ( self ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE : Any = os.path.join(lowercase , '''vocab.txt''' )
with open(lowercase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : str = BertTokenizer(lowercase )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowercase , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=lowercase , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : List[Any] = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _snake_case ( self ) -> int:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(lowercase , '''vocab.txt''' )
with open(lowercase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : Any = CustomTokenizer(lowercase )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowercase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(lowercase , '''vocab.txt''' )
with open(lowercase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertTokenizerFast.from_pretrained(lowercase )
bert_tokenizer.save_pretrained(lowercase )
__SCREAMING_SNAKE_CASE : List[str] = CustomTokenizerFast.from_pretrained(lowercase )
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowercase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' )
__SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(
f"""{USER}/test-dynamic-tokenizer""" , use_fast=lowercase , trust_remote_code=lowercase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def _snake_case ( self ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def _snake_case ( self ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] )
def _snake_case ( self ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def _snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def _snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] )
def _snake_case ( self ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] )
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = Trie()
__SCREAMING_SNAKE_CASE : Any = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowercase , ['''AB''', '''C'''] )
| 158 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 158 | 1 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCamelCase ( lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Dict = args.pruning_method
lowercase__ : Optional[int] = args.threshold
lowercase__ : Tuple = args.model_name_or_path.rstrip("""/""" )
lowercase__ : Union[str, Any] = args.target_model_path
print(F'Load fine-pruned model from {model_name_or_path}' )
lowercase__ : Any = torch.load(os.path.join(__lowercase , """pytorch_model.bin""" ) )
lowercase__ : Optional[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowercase__ : str = tensor
print(F'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
lowercase__ : Union[str, Any] = tensor
print(F'Copied layer {name}' )
elif "bias" in name:
lowercase__ : str = tensor
print(F'Copied layer {name}' )
else:
if pruning_method == "magnitude":
lowercase__ : Optional[int] = MagnitudeBinarizer.apply(inputs=__lowercase , threshold=__lowercase )
lowercase__ : Tuple = tensor * mask
print(F'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowercase__ : int = name[:-6]
lowercase__ : int = model[F'{prefix_}mask_scores']
lowercase__ : List[Any] = TopKBinarizer.apply(__lowercase , __lowercase )
lowercase__ : List[str] = tensor * mask
print(F'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowercase__ : Optional[int] = name[:-6]
lowercase__ : Optional[Any] = model[F'{prefix_}mask_scores']
lowercase__ : Optional[Any] = ThresholdBinarizer.apply(__lowercase , __lowercase , __lowercase )
lowercase__ : Any = tensor * mask
print(F'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowercase__ : Dict = name[:-6]
lowercase__ : str = model[F'{prefix_}mask_scores']
lowercase__ : int = -0.1, 1.1
lowercase__ : Union[str, Any] = torch.sigmoid(__lowercase )
lowercase__ : List[Any] = s * (r - l) + l
lowercase__ : Tuple = s_bar.clamp(min=0.0 , max=1.0 )
lowercase__ : Optional[int] = tensor * mask
print(F'Pruned layer {name}' )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
lowercase__ : int = os.path.join(
os.path.dirname(__lowercase ) , F'bertarized_{os.path.basename(__lowercase )}' )
if not os.path.isdir(__lowercase ):
shutil.copytree(__lowercase , __lowercase )
print(F'\nCreated folder {target_model_path}' )
torch.save(__lowercase , os.path.join(__lowercase , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
lowerCamelCase__ : Dict = parser.parse_args()
main(args)
| 704 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def UpperCamelCase ( lowercase_ , lowercase_ = True , lowercase_ = math.inf , lowercase_ = -math.inf , lowercase_ = math.inf , lowercase_ = -math.inf , lowercase_ = False , lowercase_ = 1_00 , lowercase_ = 0.01 , lowercase_ = 1 , ) -> Any:
'''simple docstring'''
lowercase__ : Any = False
lowercase__ : Union[str, Any] = search_prob
lowercase__ : int = start_temperate
lowercase__ : Union[str, Any] = []
lowercase__ : List[str] = 0
lowercase__ : Optional[int] = None
while not search_end:
lowercase__ : int = current_state.score()
if best_state is None or current_score > best_state.score():
lowercase__ : str = current_state
scores.append(lowercase_ )
iterations += 1
lowercase__ : Union[str, Any] = None
lowercase__ : int = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowercase__ : List[str] = random.randint(0 , len(lowercase_ ) - 1 ) # picking a random neighbor
lowercase__ : Optional[int] = neighbors.pop(lowercase_ )
lowercase__ : Dict = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowercase__ : Any = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowercase__ : Tuple = picked_neighbor
else:
lowercase__ : Union[str, Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowercase__ : Optional[int] = picked_neighbor
lowercase__ : Dict = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowercase__ : Optional[Any] = True
else:
lowercase__ : List[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowercase_ ) , lowercase_ )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def UpperCamelCase ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCamelCase__ : str = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ : List[Any] = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
lowerCamelCase__ : List[Any] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ : Optional[int] = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCamelCase__ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ : Union[str, Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f'''{local_min.score()}'''
)
lowerCamelCase__ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ : Tuple = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f'''{local_min.score()}'''
)
| 495 | 0 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] , lowerCAmelCase: List[Any] ) -> List[Any]:
_UpperCAmelCase : Any = k_size // 2
_UpperCAmelCase , _UpperCAmelCase : Tuple = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_UpperCAmelCase : List[str] = 1 / (2 * pi * sigma) * exp(-(square(lowerCAmelCase ) + square(lowerCAmelCase )) / (2 * square(lowerCAmelCase )) )
return g
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Dict , lowerCAmelCase: Union[str, Any] ) -> Union[str, Any]:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = image.shape[0], image.shape[1]
# dst image height and width
_UpperCAmelCase : Tuple = height - k_size + 1
_UpperCAmelCase : List[str] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_UpperCAmelCase : Union[str, Any] = zeros((dst_height * dst_width, k_size * k_size) )
_UpperCAmelCase : Optional[Any] = 0
for i, j in product(range(lowerCAmelCase ) , range(lowerCAmelCase ) ):
_UpperCAmelCase : Union[str, Any] = ravel(image[i : i + k_size, j : j + k_size] )
_UpperCAmelCase : Optional[Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_UpperCAmelCase : Optional[Any] = gen_gaussian_kernel(lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : Tuple = ravel(lowerCAmelCase )
# reshape and get the dst image
_UpperCAmelCase : Optional[Any] = dot(lowerCAmelCase , lowerCAmelCase ).reshape(lowerCAmelCase , lowerCAmelCase ).astype(lowerCAmelCase )
return dst
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE_ = imread(R'../image_data/lena.jpg')
# turn image in gray scale value
SCREAMING_SNAKE_CASE_ = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
SCREAMING_SNAKE_CASE_ = gaussian_filter(gray, 3, sigma=1)
SCREAMING_SNAKE_CASE_ = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 300 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class a ( UpperCAmelCase ):
_lowercase = "conditional_detr"
_lowercase = ["past_key_values"]
_lowercase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , A_=True , A_=None , A_=3 , A_=300 , A_=6 , A_=2048 , A_=8 , A_=6 , A_=2048 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=2 , A_=5 , A_=2 , A_=1 , A_=1 , A_=2 , A_=5 , A_=2 , A_=0.25 , **A_ , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(A_ , A_ ):
_UpperCAmelCase : Optional[Any] = backbone_config.get("model_type" )
_UpperCAmelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase : Dict = config_class.from_dict(A_ )
_UpperCAmelCase : Any = use_timm_backbone
_UpperCAmelCase : List[Any] = backbone_config
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : int = num_queries
_UpperCAmelCase : Union[str, Any] = d_model
_UpperCAmelCase : Dict = encoder_ffn_dim
_UpperCAmelCase : Any = encoder_layers
_UpperCAmelCase : List[str] = encoder_attention_heads
_UpperCAmelCase : Optional[int] = decoder_ffn_dim
_UpperCAmelCase : str = decoder_layers
_UpperCAmelCase : Optional[Any] = decoder_attention_heads
_UpperCAmelCase : Optional[int] = dropout
_UpperCAmelCase : List[Any] = attention_dropout
_UpperCAmelCase : List[Any] = activation_dropout
_UpperCAmelCase : List[str] = activation_function
_UpperCAmelCase : Optional[int] = init_std
_UpperCAmelCase : List[Any] = init_xavier_std
_UpperCAmelCase : Optional[int] = encoder_layerdrop
_UpperCAmelCase : List[str] = decoder_layerdrop
_UpperCAmelCase : Optional[int] = encoder_layers
_UpperCAmelCase : Union[str, Any] = auxiliary_loss
_UpperCAmelCase : str = position_embedding_type
_UpperCAmelCase : str = backbone
_UpperCAmelCase : int = use_pretrained_backbone
_UpperCAmelCase : Optional[int] = dilation
# Hungarian matcher
_UpperCAmelCase : Optional[int] = class_cost
_UpperCAmelCase : Tuple = bbox_cost
_UpperCAmelCase : Dict = giou_cost
# Loss coefficients
_UpperCAmelCase : Any = mask_loss_coefficient
_UpperCAmelCase : int = dice_loss_coefficient
_UpperCAmelCase : Any = cls_loss_coefficient
_UpperCAmelCase : Any = bbox_loss_coefficient
_UpperCAmelCase : Optional[int] = giou_loss_coefficient
_UpperCAmelCase : List[Any] = focal_alpha
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.d_model
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCAmelCase : Tuple = self.backbone_config.to_dict()
_UpperCAmelCase : Tuple = self.__class__.model_type
return output
class a ( UpperCAmelCase ):
_lowercase = version.parse("1.11" )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return 1e-5
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return 12
| 300 | 1 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase (_UpperCAmelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["""vqvae"""]
def __init__( self :Optional[Any] , __magic_name__ :int , __magic_name__ :Optional[Any] , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] , ) ->Tuple:
super().__init__()
self.register_modules(unet=lowercase__ , scheduler=lowercase__ , mel=lowercase__ , vqvae=lowercase__ )
def __snake_case ( self :Optional[int] ) ->Union[str, Any]:
return 50 if isinstance(self.scheduler , lowercase__ ) else 1_000
@torch.no_grad()
def __call__( self :int , __magic_name__ :List[str] = 1 , __magic_name__ :int = None , __magic_name__ :List[Any] = None , __magic_name__ :Optional[int] = 0 , __magic_name__ :Tuple = 0 , __magic_name__ :str = None , __magic_name__ :str = None , __magic_name__ :Union[str, Any] = 0 , __magic_name__ :Optional[Any] = 0 , __magic_name__ :Union[str, Any] = None , __magic_name__ :Tuple = 0 , __magic_name__ :Union[str, Any] = None , __magic_name__ :Dict = None , __magic_name__ :Optional[Any]=True , ) ->List[str]:
lowercase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase__ )
lowercase : Optional[int] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowercase : List[str] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowercase : str = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase__ , device=self.device , )
lowercase : Tuple = noise
lowercase : str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase__ , lowercase__ )
lowercase : Any = self.mel.audio_slice_to_image(lowercase__ )
lowercase : List[Any] = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
lowercase : str = (input_image / 255) * 2 - 1
lowercase : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowercase : Any = self.vqvae.encode(torch.unsqueeze(lowercase__ , 0 ) ).latent_dist.sample(
generator=lowercase__ )[0]
lowercase : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowercase : List[str] = self.scheduler.add_noise(lowercase__ , lowercase__ , self.scheduler.timesteps[start_step - 1] )
lowercase : Union[str, Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowercase : Optional[int] = int(mask_start_secs * pixels_per_second )
lowercase : Union[str, Any] = int(mask_end_secs * pixels_per_second )
lowercase : List[Any] = self.scheduler.add_noise(lowercase__ , lowercase__ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase__ ):
lowercase : List[str] = self.unet(lowercase__ , lowercase__ , lowercase__ )["""sample"""]
else:
lowercase : Optional[Any] = self.unet(lowercase__ , lowercase__ )["""sample"""]
if isinstance(self.scheduler , lowercase__ ):
lowercase : Optional[Any] = self.scheduler.step(
model_output=lowercase__ , timestep=lowercase__ , sample=lowercase__ , eta=lowercase__ , generator=lowercase__ , )["""prev_sample"""]
else:
lowercase : Dict = self.scheduler.step(
model_output=lowercase__ , timestep=lowercase__ , sample=lowercase__ , generator=lowercase__ , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
lowercase : Union[str, Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
lowercase : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowercase : str = 1 / self.vqvae.config.scaling_factor * images
lowercase : Dict = self.vqvae.decode(lowercase__ )["""sample"""]
lowercase : Dict = (images / 2 + 0.5).clamp(0 , 1 )
lowercase : int = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowercase : Optional[int] = (images * 255).round().astype("""uint8""" )
lowercase : Dict = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase__ , mode="""RGB""" ).convert("""L""" ) for _ in images) )
lowercase : Optional[int] = [self.mel.image_to_audio(lowercase__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase__ ) )
@torch.no_grad()
def __snake_case ( self :Optional[Any] , __magic_name__ :Dict , __magic_name__ :Union[str, Any] = 50 ) ->List[str]:
assert isinstance(self.scheduler , lowercase__ )
self.scheduler.set_timesteps(lowercase__ )
lowercase : Tuple = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
lowercase : Union[str, Any] = (sample / 255) * 2 - 1
lowercase : List[str] = torch.Tensor(lowercase__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowercase : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowercase : Optional[int] = self.scheduler.alphas_cumprod[t]
lowercase : Any = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowercase : str = 1 - alpha_prod_t
lowercase : List[Any] = self.unet(lowercase__ , lowercase__ )["""sample"""]
lowercase : Tuple = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowercase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowercase : List[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __snake_case ( __magic_name__ :str , __magic_name__ :str , __magic_name__ :int ) ->List[str]:
lowercase : List[str] = acos(torch.dot(torch.flatten(lowercase__ ) , torch.flatten(lowercase__ ) ) / torch.norm(lowercase__ ) / torch.norm(lowercase__ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase__ ) + sin(alpha * theta ) * xa / sin(lowercase__ )
| 704 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def UpperCamelCase ( _A , _A , _A=0 ) -> Any:
# Format the message.
if name is None:
lowercase : Tuple = None
else:
lowercase : Any = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
lowercase : List[str] = fmt.format(_A )
# Print and recurse (if needed).
if isinstance(_A , _A ):
if msg is not None:
print(_A )
for k in val.keys():
recursive_print(_A , val[k] , spaces + 2 )
elif isinstance(_A , torch.Tensor ):
print(_A , """:""" , val.size() )
else:
print(_A , """:""" , _A )
def UpperCamelCase ( _A , _A , _A , _A , _A ) -> Optional[int]:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowercase : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowercase : str = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowercase : Dict = param.view(*_A )
lowercase : str = param.transpose(0 , 2 )
lowercase : Optional[int] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowercase : Union[str, Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowercase : Any = param.view(*_A )
lowercase : Optional[int] = param.transpose(0 , 1 ).contiguous()
lowercase : Any = param.view(*_A )
return param
def UpperCamelCase ( _A , _A , _A ) -> List[str]:
# The converted output model.
lowercase : str = {}
# old versions did not store training args
lowercase : Optional[int] = input_state_dict.get("""args""" , _A )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowercase : List[Any] = ds_args.padded_vocab_size
lowercase : int = ds_args.max_position_embeddings
lowercase : Optional[Any] = ds_args.hidden_size
lowercase : int = ds_args.num_layers
lowercase : Union[str, Any] = ds_args.num_attention_heads
lowercase : List[str] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowercase : int = config.n_head
# The hidden_size per head.
lowercase : Union[str, Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowercase : List[str] = input_state_dict["""checkpoint_version"""]
else:
lowercase : List[str] = 0.0
# The model.
lowercase : Tuple = input_state_dict["""model"""]
# The language model.
lowercase : Optional[int] = model["""language_model"""]
# The embeddings.
lowercase : Optional[int] = lm["""embedding"""]
# The word embeddings.
lowercase : Union[str, Any] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
lowercase : Tuple = word_embeddings[: config.vocab_size, :]
lowercase : Tuple = word_embeddings
# The position embeddings.
lowercase : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowercase : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
lowercase : Optional[int] = pos_embeddings
# The transformer.
lowercase : str = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
lowercase : str = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
lowercase : Optional[Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowercase : int = layer_re.match(_A )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowercase : Optional[int] = int(m.group(1 ) )
# The name of the operation.
lowercase : Union[str, Any] = m.group(2 )
# Is it a weight or a bias?
lowercase : Dict = m.group(3 )
# The name of the layer.
lowercase : List[Any] = F"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
lowercase : List[str] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
lowercase : Dict = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowercase : Optional[int] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _A , _A )
lowercase : List[str] = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowercase : str = torch.tensor(-1e4 , dtype=torch.floataa )
lowercase : Tuple = masked_bias
lowercase : str = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowercase : int = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowercase : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowercase : str = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Store. No change of shape.
lowercase : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowercase : Optional[int] = megatron_to_transformers[op_name]
lowercase : int = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowercase : Union[str, Any] = megatron_to_transformers[op_name]
lowercase : str = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowercase : Dict = transformer["""final_layernorm.weight"""]
lowercase : Any = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
lowercase : int = word_embeddings
# It should be done!
return output_state_dict
def UpperCamelCase ( ) -> int:
# Create the argument parser.
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=_A , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=_A , help="""An optional config json file describing the pre-trained model.""" , )
lowercase : Dict = parser.parse_args()
# Extract the basename.
lowercase : Union[str, Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
lowercase : Any = torch.load(_A , map_location="""cpu""" )
else:
lowercase : Tuple = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
lowercase : Dict = input_state_dict.get("""args""" , _A )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowercase : Optional[int] = """gelu_fast"""
elif ds_args.openai_gelu:
lowercase : int = """gelu_new"""
else:
lowercase : Tuple = """gelu"""
else:
# in the very early days this used to be "gelu_new"
lowercase : List[str] = """gelu_new"""
# Spell out all parameters in case the defaults change.
lowercase : Optional[Any] = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=_A , summary_activation=_A , summary_proj_to_labels=_A , summary_first_dropout=0.1 , scale_attn_weights=_A , use_cache=_A , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
lowercase : int = GPTaConfig.from_json_file(args.config_file )
lowercase : Optional[Any] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
lowercase : List[str] = convert_megatron_checkpoint(_A , _A , _A )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_A , _A )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowercase : Optional[Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowercase : Tuple = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
lowercase : Optional[int] = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
lowercase : Optional[Any] = """gpt2"""
lowercase : int = AutoTokenizer.from_pretrained(_A )
lowercase : Union[str, Any] = type(_A ).__name__
lowercase : Any = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(_A )
# Save tokenizer based on args
print(F"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(_A )
# Store the state_dict to file.
lowercase : Any = os.path.join(_A , """pytorch_model.bin""" )
print(F"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(_A , _A )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 348 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __A ( unittest.TestCase ):
def A__ ( self :List[str] , __snake_case :List[str] ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__magic_name__ : str =model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(__snake_case )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Dict ="""sshleifer/tiny-gpt2"""
__magic_name__ : Optional[int] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__snake_case , multi_process=__snake_case , )
__magic_name__ : List[str] =TensorFlowBenchmark(__snake_case )
__magic_name__ : List[Any] =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[int] ="""sgugger/tiny-distilbert-classification"""
__magic_name__ : List[str] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , only_pretrain_model=__snake_case , )
__magic_name__ : Dict =TensorFlowBenchmark(__snake_case )
__magic_name__ : List[Any] =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : str ="""sshleifer/tiny-gpt2"""
__magic_name__ : List[Any] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , )
__magic_name__ : int =TensorFlowBenchmark(__snake_case )
__magic_name__ : List[Any] =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : int ="""sshleifer/tiny-gpt2"""
__magic_name__ : Optional[int] =AutoConfig.from_pretrained(__snake_case )
__magic_name__ : Optional[int] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__snake_case , multi_process=__snake_case , )
__magic_name__ : str =TensorFlowBenchmark(__snake_case , [config] )
__magic_name__ : List[Any] =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : List[Any] ="""sshleifer/tiny-gpt2"""
__magic_name__ : Tuple =AutoConfig.from_pretrained(__snake_case )
__magic_name__ : Dict =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , )
__magic_name__ : Union[str, Any] =TensorFlowBenchmark(__snake_case , [config] )
__magic_name__ : str =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Optional[int] ="""sshleifer/tiny-gpt2"""
__magic_name__ : Optional[int] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , )
__magic_name__ : Optional[Any] =TensorFlowBenchmark(__snake_case )
__magic_name__ : Any =benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] ="""sshleifer/tiny-gpt2"""
__magic_name__ : Tuple =AutoConfig.from_pretrained(__snake_case )
__magic_name__ : str =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , )
__magic_name__ : Optional[Any] =TensorFlowBenchmark(__snake_case , [config] )
__magic_name__ : int =benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : List[str] ="""patrickvonplaten/t5-tiny-random"""
__magic_name__ : Optional[Any] =AutoConfig.from_pretrained(__snake_case )
__magic_name__ : str =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__snake_case , )
__magic_name__ : List[Any] =TensorFlowBenchmark(__snake_case , configs=[config] )
__magic_name__ : Union[str, Any] =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : List[Any] ="""sshleifer/tiny-gpt2"""
__magic_name__ : List[Any] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__snake_case , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__snake_case , multi_process=__snake_case , )
__magic_name__ : List[str] =TensorFlowBenchmark(__snake_case )
__magic_name__ : Optional[Any] =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : int ="""sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Tuple =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__snake_case , save_to_csv=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__snake_case , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__snake_case , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__snake_case , """env.csv""" ) , multi_process=__snake_case , )
__magic_name__ : Optional[Any] =TensorFlowBenchmark(__snake_case )
benchmark.run()
self.assertTrue(Path(os.path.join(__snake_case , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__snake_case , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__snake_case , """env.csv""" ) ).exists() )
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : int ="""sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(__snake_case :Dict ):
self.assertTrue(hasattr(__snake_case , """sequential""" ) )
self.assertTrue(hasattr(__snake_case , """cumulative""" ) )
self.assertTrue(hasattr(__snake_case , """current""" ) )
self.assertTrue(hasattr(__snake_case , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : List[Any] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__snake_case , """log.txt""" ) , log_print=__snake_case , trace_memory_line_by_line=__snake_case , eager_mode=__snake_case , multi_process=__snake_case , )
__magic_name__ : List[Any] =TensorFlowBenchmark(__snake_case )
__magic_name__ : Optional[int] =benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__snake_case , """log.txt""" ) ).exists() )
| 21 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Dict ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ ) | 45 | 0 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def snake_case_ ( *__snake_case : Any , __snake_case : Optional[Union[Dict, Any]] = None , __snake_case : Tuple=True , __snake_case : Optional[int]=2) -> str:
from .. import __version__
lowerCAmelCase_ = take_from
lowerCAmelCase_ = ()
if not isinstance(args[0] , __snake_case):
lowerCAmelCase_ = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__snake_case).base_version) >= version.parse(__snake_case):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''')
lowerCAmelCase_ = None
if isinstance(__snake_case , __snake_case) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__snake_case),)
lowerCAmelCase_ = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(__snake_case , __snake_case):
values += (getattr(__snake_case , __snake_case),)
lowerCAmelCase_ = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
lowerCAmelCase_ = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
lowerCAmelCase_ = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , __snake_case , stacklevel=__snake_case)
if isinstance(__snake_case , __snake_case) and len(__snake_case) > 0:
lowerCAmelCase_ = inspect.getouterframes(inspect.currentframe())[1]
lowerCAmelCase_ = call_frame.filename
lowerCAmelCase_ = call_frame.lineno
lowerCAmelCase_ = call_frame.function
lowerCAmelCase_ ,lowerCAmelCase_ = next(iter(deprecated_kwargs.items()))
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''')
if len(__snake_case) == 0:
return
elif len(__snake_case) == 1:
return values[0]
return values
| 606 | '''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A_ : List[str] =logging.get_logger(__name__)
A_ : int ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
A_ : Dict =[
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def snake_case_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str) -> List[Any]:
for attribute in key.split('''.'''):
lowerCAmelCase_ = getattr(__snake_case , __snake_case)
if weight_type is not None:
lowerCAmelCase_ = getattr(__snake_case , __snake_case).shape
else:
lowerCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''')
if weight_type == "weight":
lowerCAmelCase_ = value
elif weight_type == "weight_g":
lowerCAmelCase_ = value
elif weight_type == "weight_v":
lowerCAmelCase_ = value
elif weight_type == "bias":
lowerCAmelCase_ = value
elif weight_type == "running_mean":
lowerCAmelCase_ = value
elif weight_type == "running_var":
lowerCAmelCase_ = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase_ = value
elif weight_type == "inv_freq":
lowerCAmelCase_ = value
else:
lowerCAmelCase_ = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''')
def snake_case_ ( __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Any) -> Union[str, Any]:
lowerCAmelCase_ = []
lowerCAmelCase_ = fairseq_model.state_dict()
lowerCAmelCase_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
lowerCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase_ = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''')[-1] == name.split('''.''')[0]:
lowerCAmelCase_ = True
if "*" in mapped_key:
lowerCAmelCase_ = name.split(__snake_case)[0].split('''.''')[-2]
lowerCAmelCase_ = mapped_key.replace('''*''' , __snake_case)
if "pos_bias_u" in name:
lowerCAmelCase_ = None
elif "pos_bias_v" in name:
lowerCAmelCase_ = None
elif "weight_g" in name:
lowerCAmelCase_ = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase_ = '''weight_v'''
elif "bias" in name:
lowerCAmelCase_ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase_ = '''weight'''
elif "running_mean" in name:
lowerCAmelCase_ = '''running_mean'''
elif "inv_freq" in name:
lowerCAmelCase_ = '''inv_freq'''
elif "running_var" in name:
lowerCAmelCase_ = '''running_var'''
elif "num_batches_tracked" in name:
lowerCAmelCase_ = '''num_batches_tracked'''
else:
lowerCAmelCase_ = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case)
continue
if not is_used:
unused_weights.append(__snake_case)
logger.warning(F'''Unused weights: {unused_weights}''')
def snake_case_ ( __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Optional[Any]) -> List[Any]:
lowerCAmelCase_ = full_name.split('''conv_layers.''')[-1]
lowerCAmelCase_ = name.split('''.''')
lowerCAmelCase_ = int(items[0])
lowerCAmelCase_ = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''')
lowerCAmelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''')
lowerCAmelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''')
lowerCAmelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''')
lowerCAmelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(__snake_case)
@torch.no_grad()
def snake_case_ ( __snake_case : Any , __snake_case : List[Any] , __snake_case : List[str]=None , __snake_case : int=None , __snake_case : Union[str, Any]=True) -> Union[str, Any]:
if config_path is not None:
lowerCAmelCase_ = WavaVecaConformerConfig.from_pretrained(__snake_case , hidden_act='''swish''')
else:
lowerCAmelCase_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCAmelCase_ = '''rotary'''
if is_finetuned:
if dict_path:
lowerCAmelCase_ = Dictionary.load(__snake_case)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase_ = target_dict.pad_index
lowerCAmelCase_ = target_dict.bos_index
lowerCAmelCase_ = target_dict.eos_index
lowerCAmelCase_ = len(target_dict.symbols)
lowerCAmelCase_ = os.path.join(__snake_case , '''vocab.json''')
if not os.path.isdir(__snake_case):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__snake_case))
return
os.makedirs(__snake_case , exist_ok=__snake_case)
lowerCAmelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
with open(__snake_case , '''w''' , encoding='''utf-8''') as vocab_handle:
json.dump(__snake_case , __snake_case)
lowerCAmelCase_ = WavaVecaCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__snake_case , )
lowerCAmelCase_ = True if config.feat_extract_norm == '''layer''' else False
lowerCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
lowerCAmelCase_ = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case)
processor.save_pretrained(__snake_case)
lowerCAmelCase_ = WavaVecaConformerForCTC(__snake_case)
else:
lowerCAmelCase_ = WavaVecaConformerForPreTraining(__snake_case)
if is_finetuned:
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''')[:-1])})
else:
lowerCAmelCase_ = argparse.Namespace(task='''audio_pretraining''')
lowerCAmelCase_ = fairseq.tasks.setup_task(__snake_case)
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__snake_case)
lowerCAmelCase_ = model[0].eval()
recursively_load_weights(__snake_case , __snake_case , not is_finetuned)
hf_wavavec.save_pretrained(__snake_case)
if __name__ == "__main__":
A_ : Any =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
A_ : Union[str, Any] =parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 606 | 1 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowercase : Optional[int] = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
lowercase : Dict = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
lowercase : str = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _a (datasets.Metric ):
'''simple docstring'''
def snake_case_ ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ,id="""token""" ) ,id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" ,id="""token""" ) ,id="""sequence""" ) ,id="""references""" ),
} ) ,)
def snake_case_ ( self ,__a ,__a ,__a = 1 ,__a = 4 ,) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__a ,hypotheses=__a ,min_len=__a ,max_len=__a )
}
| 116 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : Any = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class _a (a__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = """autoformer"""
lowerCAmelCase_ : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self ,__a = None ,__a = None ,__a = "student_t" ,__a = "nll" ,__a = 1 ,__a = [1, 2, 3, 4, 5, 6, 7] ,__a = True ,__a = 0 ,__a = 0 ,__a = 0 ,__a = 0 ,__a = None ,__a = None ,__a = 64 ,__a = 2 ,__a = 2 ,__a = 2 ,__a = 2 ,__a = 32 ,__a = 32 ,__a = "gelu" ,__a = 0.1 ,__a = 0.1 ,__a = 0.1 ,__a = 0.1 ,__a = 0.1 ,__a = 100 ,__a = 0.02 ,__a = True ,__a=True ,__a = 10 ,__a = 25 ,__a = 3 ,**__a ,) -> Union[str, Any]:
# time series specific configuration
snake_case : List[str] = prediction_length
snake_case : str = context_length if context_length is not None else prediction_length
snake_case : List[Any] = distribution_output
snake_case : Any = loss
snake_case : Union[str, Any] = input_size
snake_case : Optional[int] = num_time_features
snake_case : Union[str, Any] = lags_sequence
snake_case : Any = scaling
snake_case : str = num_dynamic_real_features
snake_case : Union[str, Any] = num_static_real_features
snake_case : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
snake_case : int = cardinality
else:
snake_case : Optional[Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
snake_case : int = embedding_dimension
else:
snake_case : List[str] = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
snake_case : Optional[int] = num_parallel_samples
# Transformer architecture configuration
snake_case : Any = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case : Union[str, Any] = d_model
snake_case : Any = encoder_attention_heads
snake_case : Union[str, Any] = decoder_attention_heads
snake_case : Optional[Any] = encoder_ffn_dim
snake_case : Any = decoder_ffn_dim
snake_case : List[Any] = encoder_layers
snake_case : List[str] = decoder_layers
snake_case : str = dropout
snake_case : Union[str, Any] = attention_dropout
snake_case : Optional[Any] = activation_dropout
snake_case : Tuple = encoder_layerdrop
snake_case : Any = decoder_layerdrop
snake_case : Any = activation_function
snake_case : int = init_std
snake_case : Optional[Any] = use_cache
# Autoformer
snake_case : int = label_length
snake_case : str = moving_average
snake_case : Tuple = autocorrelation_factor
super().__init__(is_encoder_decoder=__a ,**__a )
@property
def snake_case_ ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 116 | 1 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_a: Optional[Any] = TypeVar("""T""")
class __UpperCamelCase ( Generic[T] ):
SCREAMING_SNAKE_CASE__ = 42 # Cache store of keys
SCREAMING_SNAKE_CASE__ = 42 # References of the keys in cache
SCREAMING_SNAKE_CASE__ = 10 # Maximum capacity of cache
def __init__( self : Tuple , lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = deque()
UpperCAmelCase_ = set()
if not n:
UpperCAmelCase_ = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
UpperCAmelCase_ = n
def __A ( self : List[Any] , lowerCAmelCase : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase_ = self.dq_store.pop()
self.key_reference.remove(lowerCAmelCase )
else:
self.dq_store.remove(lowerCAmelCase )
self.dq_store.appendleft(lowerCAmelCase )
self.key_reference.add(lowerCAmelCase )
def __A ( self : Optional[Any] ):
'''simple docstring'''
for k in self.dq_store:
print(lowerCAmelCase )
def __repr__( self : Tuple ):
'''simple docstring'''
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
_a: LRUCache[str | int] = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]" | 268 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a: Dict = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
def __init__( self : Tuple , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : Optional[int]="<pad>" , lowerCAmelCase : List[str]=125 , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ = [F"<extra_id_{i}>" for i in range(lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase_ = len(set(filter(lambda lowerCAmelCase : bool("extra_id" in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
UpperCAmelCase_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else pad_token
UpperCAmelCase_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else eos_token
UpperCAmelCase_ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else unk_token
super().__init__(
eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = extra_ids
UpperCAmelCase_ = 2**8 # utf is 8 bits
# define special tokens dict
UpperCAmelCase_ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCAmelCase_ = len(self.special_tokens_encoder )
UpperCAmelCase_ = len(lowerCAmelCase )
for i, token in enumerate(lowerCAmelCase ):
UpperCAmelCase_ = self.vocab_size + i - n
UpperCAmelCase_ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __A ( self : List[Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCAmelCase )) + [1]
return ([0] * len(lowerCAmelCase )) + [1] + ([0] * len(lowerCAmelCase )) + [1]
def __A ( self : Any , lowerCAmelCase : List[int] ):
'''simple docstring'''
if len(lowerCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __A ( self : Optional[Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase_ = self._add_eos_if_not_present(lowerCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase_ = self._add_eos_if_not_present(lowerCAmelCase )
return token_ids_a + token_ids_a
def __A ( self : List[str] , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = [chr(lowerCAmelCase ) for i in text.encode("utf-8" )]
return tokens
def __A ( self : Optional[Any] , lowerCAmelCase : List[Any] ):
'''simple docstring'''
if token in self.special_tokens_encoder:
UpperCAmelCase_ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCAmelCase_ = self.added_tokens_encoder[token]
elif len(lowerCAmelCase ) != 1:
UpperCAmelCase_ = self.unk_token_id
else:
UpperCAmelCase_ = ord(lowerCAmelCase ) + self._num_special_tokens
return token_id
def __A ( self : str , lowerCAmelCase : Any ):
'''simple docstring'''
if index in self.special_tokens_decoder:
UpperCAmelCase_ = self.special_tokens_decoder[index]
else:
UpperCAmelCase_ = chr(index - self._num_special_tokens )
return token
def __A ( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = b""
for token in tokens:
if token in self.special_tokens_decoder:
UpperCAmelCase_ = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
UpperCAmelCase_ = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
UpperCAmelCase_ = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
UpperCAmelCase_ = token.encode("utf-8" )
else:
UpperCAmelCase_ = bytes([ord(lowerCAmelCase )] )
bstring += tok_string
UpperCAmelCase_ = bstring.decode("utf-8" , errors="ignore" )
return string
def __A ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
'''simple docstring'''
return () | 268 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.