code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(a__ , a__ ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(a__ , a__ ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "microsoft/speecht5_tts"
_lowerCAmelCase = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_lowerCAmelCase = "text_reader"
_lowerCAmelCase = SpeechTaProcessor
_lowerCAmelCase = SpeechTaForTextToSpeech
_lowerCAmelCase = SpeechTaHifiGan
_lowerCAmelCase = ["text"]
_lowerCAmelCase = ["audio"]
def __snake_case ( self ):
if self.post_processor is None:
A__ : int = '''microsoft/speecht5_hifigan'''
super().setup()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
A__ : List[Any] = self.pre_processor(text=UpperCamelCase__ , return_tensors='''pt''' , truncation=UpperCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
A__ : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
A__ : Dict = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.post_processor(UpperCamelCase__ ).cpu().detach()
| 55
| 0
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
_SCREAMING_SNAKE_CASE : int = {'target_lang': 'fi', 'source_lang': 'en'}
_SCREAMING_SNAKE_CASE : List[Any] = '>>zh<<'
_SCREAMING_SNAKE_CASE : List[str] = 'Helsinki-NLP/'
if is_torch_available():
_SCREAMING_SNAKE_CASE : Dict = 'pt'
elif is_tf_available():
_SCREAMING_SNAKE_CASE : int = 'tf'
else:
_SCREAMING_SNAKE_CASE : Optional[int] = 'jax'
@require_sentencepiece
class UpperCamelCase__ ( __lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = MarianTokenizer
_lowerCAmelCase = False
_lowerCAmelCase = True
def __snake_case ( self ):
super().setUp()
A__ : Optional[Any] = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
A__ : Any = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Optional[Any] = Path(self.tmpdirname )
save_json(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
A__ : str = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self , **UpperCamelCase__ ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __snake_case ( self , UpperCamelCase__ ):
return (
"This is a test",
"This is a test",
)
def __snake_case ( self ):
A__ : Optional[int] = '''</s>'''
A__ : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __snake_case ( self ):
A__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(lowerCAmelCase_ ) , 9 )
def __snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __snake_case ( self ):
A__ : Tuple = MarianTokenizer.from_pretrained(F"{ORG_NAME}opus-mt-en-de" )
A__ : Optional[Any] = en_de_tokenizer(['''I am a small frog'''] , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Any = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(lowerCAmelCase_ , batch.input_ids[0] )
A__ : List[Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCAmelCase_ )
A__ : Dict = [x.name for x in Path(lowerCAmelCase_ ).glob('''*''' )]
self.assertIn('''source.spm''' , lowerCAmelCase_ )
MarianTokenizer.from_pretrained(lowerCAmelCase_ )
def __snake_case ( self ):
A__ : List[str] = self.get_tokenizer()
A__ : Union[str, Any] = tok(
['''I am a small frog''' * 1000, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def __snake_case ( self ):
A__ : Tuple = self.get_tokenizer()
A__ : str = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __snake_case ( self ):
# fmt: off
A__ : Optional[int] = {'''input_ids''': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def __snake_case ( self ):
A__ : str = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
A__ : Tuple = '''Tämä on testi'''
A__ : Any = '''This is a test'''
A__ : List[Any] = [76, 7, 2047, 2]
A__ : List[Any] = [69, 12, 11, 940, 2]
A__ : Optional[int] = tokenizer(lowerCAmelCase_ ).input_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Dict = tokenizer(text_target=lowerCAmelCase_ ).input_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Tuple = tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 716
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'gpt-neox-20b': 2_0_4_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : List[Any] = add_prefix_space
A__ : Any = pre_tok_class(**UpperCamelCase__ )
A__ : List[Any] = add_prefix_space
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
A__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 55
| 0
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 717
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "deformable_detr"
_lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=3 , UpperCamelCase__=300 , UpperCamelCase__=1024 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1.0 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="sine" , UpperCamelCase__="resnet50" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=False , UpperCamelCase__=300 , UpperCamelCase__=False , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , UpperCamelCase__=0.2_5 , UpperCamelCase__=False , **UpperCamelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A__ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = backbone_config.get('''model_type''' )
A__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
A__ : Optional[int] = config_class.from_dict(UpperCamelCase__ )
A__ : Tuple = use_timm_backbone
A__ : int = backbone_config
A__ : List[Any] = num_channels
A__ : List[Any] = num_queries
A__ : str = max_position_embeddings
A__ : Tuple = d_model
A__ : int = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : Optional[Any] = encoder_attention_heads
A__ : List[Any] = decoder_ffn_dim
A__ : Tuple = decoder_layers
A__ : Optional[Any] = decoder_attention_heads
A__ : List[str] = dropout
A__ : str = attention_dropout
A__ : List[Any] = activation_dropout
A__ : Any = activation_function
A__ : Optional[Any] = init_std
A__ : Union[str, Any] = init_xavier_std
A__ : Union[str, Any] = encoder_layerdrop
A__ : Optional[int] = auxiliary_loss
A__ : str = position_embedding_type
A__ : List[Any] = backbone
A__ : Optional[Any] = use_pretrained_backbone
A__ : Any = dilation
# deformable attributes
A__ : List[Any] = num_feature_levels
A__ : List[str] = encoder_n_points
A__ : int = decoder_n_points
A__ : List[Any] = two_stage
A__ : Dict = two_stage_num_proposals
A__ : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
A__ : List[str] = class_cost
A__ : List[Any] = bbox_cost
A__ : Any = giou_cost
# Loss coefficients
A__ : List[str] = mask_loss_coefficient
A__ : Union[str, Any] = dice_loss_coefficient
A__ : List[Any] = bbox_loss_coefficient
A__ : Tuple = giou_loss_coefficient
A__ : Optional[Any] = eos_coefficient
A__ : List[Any] = focal_alpha
A__ : List[str] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.encoder_attention_heads
@property
def __snake_case ( self ):
return self.d_model
def __snake_case ( self ):
A__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : Tuple = self.backbone_config.to_dict()
A__ : Optional[int] = self.__class__.model_type
return output
| 55
| 0
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : List[Any] = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=__UpperCamelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=__UpperCamelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=__UpperCamelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=__UpperCamelCase , default=10_00 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=__UpperCamelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=__UpperCamelCase , type=__UpperCamelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=__UpperCamelCase , default=5_12 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=__UpperCamelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
A__ : List[str] = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Any:
"""simple docstring"""
def fn(__UpperCamelCase : List[str] ):
return tokenizer(examples['''text'''] )
return fn
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
A__ : Optional[int] = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
A__ : List[str] = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
A__ : Dict = tf.train.Features(feature=__UpperCamelCase )
A__ : int = tf.train.Example(features=__UpperCamelCase )
A__ : Tuple = example.SerializeToString()
records.append(__UpperCamelCase )
return records
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
A__ : Dict = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
A__ : Union[str, Any] = min(len(__UpperCamelCase ) , args.limit )
A__ : List[str] = dataset.select(range(__UpperCamelCase ) )
print(F"Limiting the dataset to {args.limit} entries." )
A__ : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
A__ : List[str] = os.path.join(args.output_dir , args.split )
if not os.path.exists(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
else:
A__ : int = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
A__ : Any = tokenize_function(__UpperCamelCase )
A__ : Tuple = dataset.map(__UpperCamelCase , batched=__UpperCamelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__UpperCamelCase : int ):
# Concatenate all texts.
A__ : List[str] = {k: sum(examples[k] , [] ) for k in examples.keys()}
A__ : int = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
A__ : Tuple = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
A__ : Optional[int] = {
k: [t[i : i + args.max_length] for i in range(0 , __UpperCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
A__ : List[str] = dataset_tokenized.map(__UpperCamelCase , batched=__UpperCamelCase , batch_size=10_00 , num_proc=4 )
A__ : Union[str, Any] = 0
A__ : Any = 0
for shard in range(0 , len(__UpperCamelCase ) , args.shard_size ):
A__ : Optional[int] = grouped_dataset[shard : shard + args.shard_size]
A__ : Optional[Any] = len(dataset_snapshot['''input_ids'''] )
A__ : Optional[int] = os.path.join(__UpperCamelCase , F"dataset-{shard_count}-{records_containing}.tfrecord" )
A__ : Optional[int] = get_serialized_examples(__UpperCamelCase )
with tf.io.TFRecordWriter(__UpperCamelCase ) as out_file:
for i in range(len(__UpperCamelCase ) ):
A__ : Tuple = serialized_examples[i]
out_file.write(__UpperCamelCase )
print('''Wrote file {} containing {} records'''.format(__UpperCamelCase , __UpperCamelCase ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , '''w''' ) as f:
print(F"Total {args.split} records: {total_records}" , file=__UpperCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = parse_args()
main(args)
| 718
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] = 0
A__ : Optional[Any] = len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
if len(__UpperCamelCase ) <= 1:
return arr, 0
A__ : Optional[int] = len(__UpperCamelCase ) // 2
A__ : List[str] = arr[0:mid]
A__ : Union[str, Any] = arr[mid:]
A__ , A__ : List[Any] = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : Dict = _count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
A__ : Any = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
A__ : str = []
A__ : Tuple = 0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A__ : int = count_inversions_bf(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A__ : Optional[Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Dict = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
# an empty list should also have zero inversions
A__ : Union[str, Any] = []
A__ : Union[str, Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Any = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 0
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_SCREAMING_SNAKE_CASE : Optional[int] = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
F" reinstalling {pkg}." )
if not ops[op](version.parse(__a ) , version.parse(__a ) ):
raise ImportError(
F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] = F"\n{hint}" if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , __a ):
A__ , A__ , A__ : Tuple = requirement, None, None
else:
A__ : str = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , __a )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F" got {requirement}" )
A__ , A__ : str = match[0]
A__ : Any = want_full.split(''',''' ) # there could be multiple requirements
A__ : Dict = {}
for w in want_range:
A__ : Union[str, Any] = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , __a )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F" but got {requirement}" )
A__ , A__ : Tuple = match[0]
A__ : List[str] = want_ver
if op not in ops:
raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
A__ : Optional[int] = '''.'''.join([str(__a ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__a , __a , __a , __a , __a , __a )
return
# check if any version is installed
try:
A__ : int = importlib.metadata.version(__a )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__a , __a , __a , __a , __a , __a )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
A__ : List[str] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(__a , __a )
| 719
|
from PIL import Image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Image , __UpperCamelCase : float ) -> Image:
"""simple docstring"""
def brightness(__UpperCamelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_SCREAMING_SNAKE_CASE : Dict = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 55
| 0
|
from scipy.stats import spearmanr
import datasets
_SCREAMING_SNAKE_CASE : Optional[Any] = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_SCREAMING_SNAKE_CASE : Optional[Any] = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_SCREAMING_SNAKE_CASE : Tuple = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Optional[Any] = spearmanr(snake_case_ , snake_case_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 720
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
def __snake_case ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
A__ : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
A__ : Dict = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
A__ : Optional[int] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : str = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 55
| 0
|
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_SCREAMING_SNAKE_CASE : str = re.compile(r'\b(a|an|the)\b', re.UNICODE)
_SCREAMING_SNAKE_CASE : Optional[Any] = None
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ : Any = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__UpperCamelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__UpperCamelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> int:
"""simple docstring"""
A__ : List[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
A__ : Union[str, Any] = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> List[str]:
"""simple docstring"""
def remove_articles(__UpperCamelCase : Optional[int] ):
return ARTICLES_REGEX.sub(''' ''' , __UpperCamelCase )
def white_space_fix(__UpperCamelCase : List[str] ):
return " ".join(text.split() )
def remove_punc(__UpperCamelCase : List[Any] ):
A__ : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCamelCase : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
if not s:
return []
return normalize_answer(__UpperCamelCase ).split()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
return int(normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
A__ : Union[str, Any] = get_tokens(__UpperCamelCase )
A__ : Optional[Any] = get_tokens(__UpperCamelCase )
A__ : str = collections.Counter(__UpperCamelCase ) & collections.Counter(__UpperCamelCase )
A__ : Optional[int] = sum(common.values() )
if len(__UpperCamelCase ) == 0 or len(__UpperCamelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
A__ : Union[str, Any] = 1.0 * num_same / len(__UpperCamelCase )
A__ : List[Any] = 1.0 * num_same / len(__UpperCamelCase )
A__ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
A__ : Any = {}
A__ : Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
A__ : Union[str, Any] = qa['''id''']
A__ : int = [t for t in qa['''answers''']['''text'''] if normalize_answer(__UpperCamelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
A__ : Union[str, Any] = ['''''']
if qid not in preds:
print(F"Missing prediction for {qid}" )
continue
A__ : Optional[int] = preds[qid]
# Take max over all gold answers
A__ : List[str] = max(compute_exact(__UpperCamelCase , __UpperCamelCase ) for a in gold_answers )
A__ : Optional[int] = max(compute_fa(__UpperCamelCase , __UpperCamelCase ) for a in gold_answers )
return exact_scores, fa_scores
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
A__ : List[str] = {}
for qid, s in scores.items():
A__ : Optional[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
A__ : Union[str, Any] = float(not qid_to_has_ans[qid] )
else:
A__ : Union[str, Any] = s
return new_scores
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
if not qid_list:
A__ : Union[str, Any] = len(__UpperCamelCase )
return collections.OrderedDict(
[
('''exact''', 1_00.0 * sum(exact_scores.values() ) / total),
('''f1''', 1_00.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
A__ : Union[str, Any] = len(__UpperCamelCase )
return collections.OrderedDict(
[
('''exact''', 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
for k in new_eval:
A__ : Optional[int] = new_eval[k]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
plt.step(__UpperCamelCase , __UpperCamelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__UpperCamelCase , __UpperCamelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(__UpperCamelCase )
plt.savefig(__UpperCamelCase )
plt.clf()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Optional[Any]=None ) -> Optional[Any]:
"""simple docstring"""
A__ : int = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : na_probs[k] )
A__ : int = 0.0
A__ : int = 1.0
A__ : Optional[int] = 0.0
A__ : Tuple = [1.0]
A__ : Union[str, Any] = [0.0]
A__ : Any = 0.0
for i, qid in enumerate(__UpperCamelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
A__ : List[str] = true_pos / float(i + 1 )
A__ : List[str] = true_pos / float(__UpperCamelCase )
if i == len(__UpperCamelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__UpperCamelCase )
recalls.append(__UpperCamelCase )
if out_image:
plot_pr_curve(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return {"ap": 1_00.0 * avg_prec}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
if out_image_dir and not os.path.exists(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
A__ : Optional[Any] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
A__ : List[str] = make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
A__ : List[Any] = make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
A__ : Optional[Any] = {k: float(__UpperCamelCase ) for k, v in qid_to_has_ans.items()}
A__ : List[str] = make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__UpperCamelCase , __UpperCamelCase , '''pr_exact''' )
merge_eval(__UpperCamelCase , __UpperCamelCase , '''pr_f1''' )
merge_eval(__UpperCamelCase , __UpperCamelCase , '''pr_oracle''' )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if not qid_list:
return
A__ : str = [na_probs[k] for k in qid_list]
A__ : int = np.ones_like(__UpperCamelCase ) / float(len(__UpperCamelCase ) )
plt.hist(__UpperCamelCase , weights=__UpperCamelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(F"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(__UpperCamelCase , F"na_prob_hist_{name}.png" ) )
plt.clf()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
A__ : List[str] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
A__ : Any = num_no_ans
A__ : Union[str, Any] = cur_score
A__ : Tuple = 0.0
A__ : int = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : na_probs[k] )
for i, qid in enumerate(__UpperCamelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
A__ : Tuple = scores[qid]
else:
if preds[qid]:
A__ : Any = -1
else:
A__ : Tuple = 0
cur_score += diff
if cur_score > best_score:
A__ : Tuple = cur_score
A__ : Any = na_probs[qid]
return 1_00.0 * best_score / len(__UpperCamelCase ), best_thresh
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Any ) -> Dict:
"""simple docstring"""
A__ : Union[str, Any] = find_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ : Union[str, Any] = find_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ : Optional[Any] = best_exact
A__ : Optional[Any] = exact_thresh
A__ : str = best_fa
A__ : Tuple = fa_thresh
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
with open(OPTS.data_file ) as f:
A__ : Tuple = json.load(__UpperCamelCase )
A__ : List[Any] = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
A__ : Optional[int] = json.load(__UpperCamelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
A__ : Union[str, Any] = json.load(__UpperCamelCase )
else:
A__ : Optional[int] = {k: 0.0 for k in preds}
A__ : List[str] = make_qid_to_has_ans(__UpperCamelCase ) # maps qid to True/False
A__ : Optional[Any] = [k for k, v in qid_to_has_ans.items() if v]
A__ : List[Any] = [k for k, v in qid_to_has_ans.items() if not v]
A__ : int = get_raw_scores(__UpperCamelCase , __UpperCamelCase )
A__ : Optional[Any] = apply_no_ans_threshold(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.na_prob_thresh )
A__ : Dict = apply_no_ans_threshold(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.na_prob_thresh )
A__ : List[Any] = make_eval_dict(__UpperCamelCase , __UpperCamelCase )
if has_ans_qids:
A__ : Optional[Any] = make_eval_dict(__UpperCamelCase , __UpperCamelCase , qid_list=__UpperCamelCase )
merge_eval(__UpperCamelCase , __UpperCamelCase , '''HasAns''' )
if no_ans_qids:
A__ : int = make_eval_dict(__UpperCamelCase , __UpperCamelCase , qid_list=__UpperCamelCase )
merge_eval(__UpperCamelCase , __UpperCamelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir )
histogram_na_prob(__UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
else:
print(json.dumps(__UpperCamelCase , indent=2 ) )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 721
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_SCREAMING_SNAKE_CASE : Union[str, Any] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_SCREAMING_SNAKE_CASE : Tuple = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_SCREAMING_SNAKE_CASE : Optional[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , ):
A__ : List[Any] = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
A__ : Dict = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
A__ : Optional[Any] = TER(
normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , )
A__ : str = sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 55
| 0
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Optional[Any]:
"""simple docstring"""
A__ : Union[str, Any] = model.config
A__ : Optional[Any] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
A__ : Optional[Any] = MBartConfig(
is_decoder=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , add_cross_attention=_lowerCAmelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=_lowerCAmelCase , add_final_layer_norm=_lowerCAmelCase , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if "encoder.model" in name:
A__ : int = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
A__ : Optional[int] = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
A__ : Tuple = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
A__ : List[Any] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
A__ : Dict = '''encoder.''' + name
if "attn.proj" in name:
A__ : Optional[int] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
A__ : Union[str, Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
A__ : Tuple = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A__ : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A__ : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A__ : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
A__ : Union[str, Any] = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
A__ : List[str] = '''encoder.layernorm.bias'''
return name
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A__ : Optional[Any] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
A__ : Any = key.split('''.''' )
A__ : Union[str, Any] = int(key_split[3] )
A__ : Union[str, Any] = int(key_split[5] )
A__ : Optional[Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A__ : Union[str, Any] = val[:dim, :]
A__ : Any = val[dim : dim * 2, :]
A__ : Any = val[-dim:, :]
else:
A__ : Optional[Any] = val[:dim]
A__ : Tuple = val[dim : dim * 2]
A__ : Optional[Any] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
A__ : Union[str, Any] = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : int=None , __UpperCamelCase : Optional[Any]=False ) -> List[Any]:
"""simple docstring"""
A__ : Any = DonutModel.from_pretrained(_lowerCAmelCase ).eval()
# load HuggingFace model
A__ , A__ : Dict = get_configs(_lowerCAmelCase )
A__ : Optional[int] = DonutSwinModel(_lowerCAmelCase )
A__ : Dict = MBartForCausalLM(_lowerCAmelCase )
A__ : Dict = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
model.eval()
A__ : Optional[Any] = original_model.state_dict()
A__ : Any = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# verify results on scanned document
A__ : List[Any] = load_dataset('''hf-internal-testing/example-documents''' )
A__ : List[str] = dataset['''test'''][0]['''image'''].convert('''RGB''' )
A__ : Any = XLMRobertaTokenizerFast.from_pretrained(_lowerCAmelCase , from_slow=_lowerCAmelCase )
A__ : Optional[Any] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
A__ : Tuple = DonutProcessor(_lowerCAmelCase , _lowerCAmelCase )
A__ : Tuple = processor(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
A__ : List[str] = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
A__ : Union[str, Any] = '''When is the coffee break?'''
A__ : List[str] = task_prompt.replace('''{user_input}''' , _lowerCAmelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
A__ : Union[str, Any] = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
A__ : str = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
A__ : List[Any] = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
A__ : int = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
A__ : Tuple = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
A__ : Any = original_model.decoder.tokenizer(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors='''pt''' )[
'''input_ids'''
]
A__ : List[str] = original_model.encoder.model.patch_embed(_lowerCAmelCase )
A__ , A__ : Optional[int] = model.encoder.embeddings(_lowerCAmelCase )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
# verify encoder hidden states
A__ : List[str] = original_model.encoder(_lowerCAmelCase )
A__ : Dict = model.encoder(_lowerCAmelCase ).last_hidden_state
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-2 )
# verify decoder hidden states
A__ : Tuple = original_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).logits
A__ : Optional[Any] = model(_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ).logits
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
_SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 700
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
# TODO Update this
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "esm"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1026 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[Any] = vocab_size
A__ : int = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : str = intermediate_size
A__ : List[str] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : List[str] = initializer_range
A__ : List[Any] = layer_norm_eps
A__ : int = position_embedding_type
A__ : Optional[Any] = use_cache
A__ : Optional[int] = emb_layer_norm_before
A__ : List[str] = token_dropout
A__ : Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
A__ : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = EsmFoldConfig(**UpperCamelCase__ )
A__ : int = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
A__ : Any = get_default_vocab_list()
else:
A__ : Dict = vocab_list
else:
A__ : Optional[Any] = None
A__ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __snake_case ( self ):
A__ : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
A__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 0
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.trunk is None:
A__ : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
A__ : List[Any] = TrunkConfig(**self.trunk )
def __snake_case ( self ):
A__ : Optional[int] = asdict(self )
A__ : int = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 48
_lowerCAmelCase = 1_024
_lowerCAmelCase = 128
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = False
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.structure_module is None:
A__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
A__ : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A__ : Tuple = self.sequence_state_dim // self.sequence_head_width
A__ : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __snake_case ( self ):
A__ : List[Any] = asdict(self )
A__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 384
_lowerCAmelCase = 128
_lowerCAmelCase = 16
_lowerCAmelCase = 128
_lowerCAmelCase = 12
_lowerCAmelCase = 4
_lowerCAmelCase = 8
_lowerCAmelCase = 0.1
_lowerCAmelCase = 8
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 7
_lowerCAmelCase = 10
_lowerCAmelCase = 1e-8
_lowerCAmelCase = 1e5
def __snake_case ( self ):
return asdict(self )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55
| 0
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Dict=False ) -> Any:
"""simple docstring"""
try:
A__ : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ : Optional[Any] = default
else:
# KEY is set, convert it to True or False.
try:
A__ : int = strtobool(lowerCamelCase_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE : Any = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> str:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None ) -> Any:
"""simple docstring"""
if test_case is None:
return partial(lowerCamelCase_ , version=lowerCamelCase_ )
return unittest.skipUnless(is_torch_version('''>=''' , lowerCamelCase_ ) , F"test requires torch version >= {version}" )(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowerCamelCase_ )
_SCREAMING_SNAKE_CASE : List[str] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowerCamelCase_ )
class UpperCamelCase__ ( unittest.TestCase ):
_lowerCAmelCase = True
@classmethod
def __snake_case ( cls ):
A__ : Tuple = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__lowerCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
def __snake_case ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__ ( unittest.TestCase ):
def __snake_case ( self , UpperCamelCase__ ):
A__ : int = mocks if isinstance(__lowerCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
A__ : Tuple = AcceleratorState()
A__ : str = tensor[None].clone().to(state.device )
A__ : Any = gather(lowerCamelCase_ ).cpu()
A__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCamelCase_ ):
return False
return True
class UpperCamelCase__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = returncode
A__ : List[str] = stdout
A__ : List[Any] = stderr
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Any ) -> str:
"""simple docstring"""
while True:
A__ : Union[str, Any] = await stream.readline()
if line:
callback(lowerCamelCase_ )
else:
break
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Dict=None , __UpperCamelCase : Any=None , __UpperCamelCase : str=False , __UpperCamelCase : List[str]=False ) -> str:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(lowerCamelCase_ ) )
A__ : Dict = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ : Union[str, Any] = []
A__ : List[str] = []
def tee(__UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : int="" ):
A__ : Optional[int] = line.decode('''utf-8''' ).rstrip()
sink.append(lowerCamelCase_ )
if not quiet:
print(lowerCamelCase_ , lowerCamelCase_ , file=lowerCamelCase_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowerCamelCase_ , )
return _RunOutput(await p.wait() , lowerCamelCase_ , lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : int=None , __UpperCamelCase : str=1_80 , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Optional[int]=True ) -> int:
"""simple docstring"""
A__ : Dict = asyncio.get_event_loop()
A__ : List[Any] = loop.run_until_complete(
_stream_subprocess(lowerCamelCase_ , env=lowerCamelCase_ , stdin=lowerCamelCase_ , timeout=lowerCamelCase_ , quiet=lowerCamelCase_ , echo=lowerCamelCase_ ) )
A__ : int = ''' '''.join(lowerCamelCase_ )
if result.returncode > 0:
A__ : List[str] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCamelCase__ ( lowerCamelCase__ ):
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : int=False ) -> List[str]:
"""simple docstring"""
try:
A__ : Dict = subprocess.check_output(lowerCamelCase_ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCamelCase_ , '''decode''' ):
A__ : Tuple = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(lowerCamelCase_ )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 701
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1024 , UpperCamelCase__=1024 , UpperCamelCase__=3.6 ):
A__ : str = tokenizer
A__ : int = tokenizer.bos_token_id
A__ : List[Any] = dataset
A__ : Tuple = seq_length
A__ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
A__ : Dict = iter(self.dataset )
A__ : Tuple = True
while more_examples:
A__ , A__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ : Dict = False
break
A__ : str = tokenizer(UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
A__ : Optional[int] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCamelCase__ ) , self.seq_length ):
A__ : Optional[int] = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase__ ) == self.seq_length:
yield torch.tensor(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
A__ : Any = {'''streaming''': True}
A__ : List[str] = load_dataset(args.dataset_name , split='''train''' , **__UpperCamelCase )
A__ : List[str] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
A__ : int = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
model.eval()
A__ : Dict = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
A__ : Any = model(__UpperCamelCase , labels=__UpperCamelCase )
A__ : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ : Tuple = torch.mean(torch.cat(__UpperCamelCase ) )
try:
A__ : Optional[Any] = torch.exp(__UpperCamelCase )
except OverflowError:
A__ : Union[str, Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_SCREAMING_SNAKE_CASE : List[Any] = Accelerator()
# Parse configuration
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(EvaluationArguments)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_SCREAMING_SNAKE_CASE : Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 55
| 0
|
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {"vocab_file": "spiece.model"}
_SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_SCREAMING_SNAKE_CASE : str = {
"AI-Sweden/gpt-sw3-126m": 2_0_4_8,
"AI-Sweden/gpt-sw3-350m": 2_0_4_8,
"AI-Sweden/gpt-sw3-1.6b": 2_0_4_8,
"AI-Sweden/gpt-sw3-6.7b": 2_0_4_8,
"AI-Sweden/gpt-sw3-20b": 2_0_4_8,
}
class UpperCamelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__ = None , **UpperCamelCase__ , ):
A__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
A__ : Union[str, Any] = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
A__ : str = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
A__ : int = '''<|endoftext|>''' if eos_token is None else eos_token
A__ : str = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
A__ : str = unk_token if pad_token is None else pad_token
A__ : Dict = eos_token if bos_token is None else bos_token
else:
A__ : Optional[Any] = '''<pad>''' if pad_token is None else pad_token
A__ : Any = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
A__ : List[Any] = do_lower_case
A__ : Tuple = remove_space
A__ : Optional[Any] = keep_accents
A__ : List[str] = vocab_file
A__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
# Used for whitespace normalization in input texts
# fmt : off
A__ : Optional[Any] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
A__ : Optional[int] = re.compile(
F"[{''.join(map(_lowercase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]" )
def __getstate__( self ):
A__ : Optional[Any] = self.__dict__.copy()
A__ : Any = None
return state
def __setstate__( self , UpperCamelCase__ ):
A__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A__ : Dict = {}
A__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __snake_case ( self ):
return len(self.sp_model )
def __snake_case ( self , UpperCamelCase__ ):
A__ : Optional[int] = self.non_printing_characters_re.sub('''''' , _lowercase )
# Normalize whitespaces
A__ : List[Any] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
A__ : Optional[int] = unicodedata.normalize('''NFC''' , _lowercase )
return text
def __snake_case ( self , UpperCamelCase__ , **UpperCamelCase__ ):
A__ : int = self.preprocess_text(_lowercase )
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def __snake_case ( self , UpperCamelCase__ ):
return self.sp_model.PieceToId(_lowercase )
def __snake_case ( self , UpperCamelCase__ ):
return self.sp_model.IdToPiece(_lowercase )
@staticmethod
def __snake_case ( UpperCamelCase__ ):
return out_string
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = []
A__ : Any = ''''''
A__ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowercase ) + token
A__ : Any = True
A__ : Tuple = []
else:
current_sub_tokens.append(_lowercase )
A__ : Union[str, Any] = False
out_string += self.sp_model.decode(_lowercase )
return out_string
def __snake_case ( self ):
A__ : List[Any] = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
if not os.path.isdir(_lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : Tuple = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , '''wb''' ) as fi:
A__ : Tuple = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,)
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = False ):
if isinstance(_lowercase , _lowercase ):
A__ : Union[str, Any] = self.preprocess_text(_lowercase )
A__ : Optional[Any] = self.sp_model.encode(_lowercase )
else:
A__ : Dict = [self.preprocess_text(_lowercase ) for t in text]
A__ : Union[str, Any] = self.sp_model.encode(_lowercase )
if return_tensors is True or return_tensors == "pt":
A__ : Optional[int] = torch.tensor(_lowercase )
return token_ids
def __snake_case ( self , UpperCamelCase__ ):
return self.sp_model.decode(_lowercase )
def __snake_case ( self , UpperCamelCase__ ):
A__ : Optional[int] = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()]
A__ : Dict = (
F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(_lowercase ) + F"{self.bos_token}Bot:"
)
return self.encode(text=_lowercase )
| 702
|
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 55
| 0
|
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A__ : Dict = SwinConfig(image_size=1_92 )
if "base" in model_name:
A__ : Dict = 6
A__ : Optional[Any] = 1_28
A__ : str = (2, 2, 18, 2)
A__ : Dict = (4, 8, 16, 32)
elif "large" in model_name:
A__ : str = 12
A__ : Optional[int] = 1_92
A__ : Any = (2, 2, 18, 2)
A__ : int = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
A__ : Any = window_size
A__ : Any = embed_dim
A__ : Dict = depths
A__ : Union[str, Any] = num_heads
return config
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if "encoder.mask_token" in name:
A__ : int = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
A__ : List[str] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
A__ : Tuple = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
A__ : str = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
A__ : List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
A__ : int = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A__ : List[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
A__ : Optional[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A__ : int = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
A__ : List[Any] = "layernorm.weight"
if name == "encoder.norm.bias":
A__ : Optional[Any] = "layernorm.bias"
if "decoder" in name:
pass
else:
A__ : str = "swin." + name
return name
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A__ : Optional[int] = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if "attn_mask" in key:
pass
elif "qkv" in key:
A__ : str = key.split('''.''' )
A__ : int = int(key_split[2] )
A__ : str = int(key_split[4] )
A__ : int = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A__ : Optional[Any] = val[:dim, :]
A__ : Any = val[
dim : dim * 2, :
]
A__ : List[str] = val[-dim:, :]
else:
A__ : Tuple = val[
:dim
]
A__ : int = val[
dim : dim * 2
]
A__ : Any = val[
-dim:
]
else:
A__ : int = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
"""simple docstring"""
A__ : Union[str, Any] = torch.load(__SCREAMING_SNAKE_CASE , map_location='''cpu''' )["model"]
A__ : Any = get_swin_config(__SCREAMING_SNAKE_CASE )
A__ : Optional[int] = SwinForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.eval()
A__ : Tuple = convert_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
A__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ : Optional[int] = ViTImageProcessor(size={'''height''': 1_92, '''width''': 1_92} )
A__ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
A__ : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
with torch.no_grad():
A__ : Tuple = model(**__SCREAMING_SNAKE_CASE ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"Pushing model and image processor for {model_name} to hub" )
model.push_to_hub(F"microsoft/{model_name}" )
image_processor.push_to_hub(F"microsoft/{model_name}" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 703
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Dict = inspect.getfile(accelerate.test_utils )
A__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A__ : Tuple = test_metrics
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __snake_case ( self ):
self.test_metrics.main()
@require_multi_gpu
def __snake_case ( self ):
print(F"Found {torch.cuda.device_count()} devices." )
A__ : int = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 55
| 0
|
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
'''simple docstring'''
@staticmethod
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ):
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
A__ : Optional[int] = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = object_detector(examples[0] , threshold=0.0 )
A__ : Union[str, Any] = len(A_ )
self.assertGreater(A_ , 0 )
self.assertEqual(
A_ , [
{
'''score''': ANY(A_ ),
'''label''': ANY(A_ ),
'''box''': {'''xmin''': ANY(A_ ), '''ymin''': ANY(A_ ), '''xmax''': ANY(A_ ), '''ymax''': ANY(A_ )},
}
for i in range(A_ )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __snake_case ( self ):
pass
@require_torch
def __snake_case ( self ):
A__ : List[Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
A__ : Any = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] , )
A__ : Any = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[
{'''score''': 0.7_2_3_5, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_2_1_8, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_1_8_4, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6_7_4_8, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_6_5_6, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_6_1_4, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_4_5_6, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.6_4_2, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6_4_1_9, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] , )
@require_torch
@slow
def __snake_case ( self ):
A__ : Union[str, Any] = pipeline('''zero-shot-object-detection''' )
A__ : Optional[int] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] , )
A__ : Optional[Any] = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_4_7_4, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_2_0_8, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __snake_case ( self ):
pass
@require_torch
@slow
def __snake_case ( self ):
A__ : Dict = 0.2
A__ : Any = pipeline('''zero-shot-object-detection''' )
A__ : List[str] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=A_ , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_5_3_7, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] , )
@require_torch
@slow
def __snake_case ( self ):
A__ : Optional[Any] = 2
A__ : List[str] = pipeline('''zero-shot-object-detection''' )
A__ : Union[str, Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=A_ , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'''score''': 0.2_8_6_8, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_7_7, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] , )
| 704
|
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 0
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
A__ = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_lowerCamelCase )
A__ = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_lowerCamelCase )
EnvironmentCommand.register_subcommand(_lowerCamelCase )
TestCommand.register_subcommand(_lowerCamelCase )
RunBeamCommand.register_subcommand(_lowerCamelCase )
DummyDataCommand.register_subcommand(_lowerCamelCase )
# Parse args
A__ = parser.parse_known_args()
if not hasattr(_lowerCamelCase , '''func''' ):
parser.print_help()
exit(1 )
A__ = parse_unknown_args(_lowerCamelCase )
# Run
A__ = args.func(_lowerCamelCase , **_lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 705
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : int = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
| 0
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_SCREAMING_SNAKE_CASE : Dict = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "dhaka" , __UpperCamelCase : int = 5 ) -> str:
"""simple docstring"""
A__ : Dict = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
A__ : str = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
A__ : str = requests.get('''https://www.google.com/search''' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
A__ : Any = BeautifulSoup(html.text , '''html.parser''' )
A__ : List[str] = ''''''.join(
re.findall(R'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
A__ : Any = json.dumps(_SCREAMING_SNAKE_CASE )
A__ : int = json.loads(_SCREAMING_SNAKE_CASE )
A__ : Tuple = re.findall(
R'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , _SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
A__ : Union[str, Any] = re.sub(
R'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(_SCREAMING_SNAKE_CASE ) , )
A__ : Dict = re.findall(
R'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , _SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
A__ : str = bytes(_SCREAMING_SNAKE_CASE , '''ascii''' ).decode(
'''unicode-escape''' )
A__ : str = bytes(_SCREAMING_SNAKE_CASE , '''ascii''' ).decode(
'''unicode-escape''' )
A__ : Dict = urllib.request.build_opener()
A__ : Union[str, Any] = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(_SCREAMING_SNAKE_CASE )
A__ : str = F"query_{query.replace(' ' , '_' )}"
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
_SCREAMING_SNAKE_CASE , F"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
_SCREAMING_SNAKE_CASE : Dict = download_images_from_google_query(sys.argv[1])
print(f"""{image_count} images were downloaded to disk.""")
except IndexError:
print('Please provide a search term.')
raise
| 706
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/vocab.json')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures')
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def __snake_case ( self ):
A__ : List[Any] = 0
def __snake_case ( self ):
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = WavaVecaConfig()
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
A__ : Any = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Dict = WavaVecaFeatureExtractor()
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : Optional[int] = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : str = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[int] = WavaVecaFeatureExtractor()
A__ : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : str = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : List[Any] = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(UpperCamelCase__ )
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write('''{}''' )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A__ : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
A__ : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A__ : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A__ : List[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A__ : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
A__ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __snake_case ( self ):
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : str = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : str = CustomTokenizer(UpperCamelCase__ )
A__ : Optional[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "AutoFeatureExtractor"
_lowerCAmelCase = "AutoTokenizer"
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local classes.
A__ : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A__ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A__ : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
A__ : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __snake_case ( self ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __snake_case ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __snake_case ( self ):
A__ : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A__ : List[Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
A__ : int = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
A__ : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : List[Any] = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : Union[str, Any] = CustomTokenizer(UpperCamelCase__ )
A__ : List[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
A__ : Union[str, Any] = Repository(UpperCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(UpperCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A__ : Tuple = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 55
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase__ ( A_ ):
'''simple docstring'''
_lowerCAmelCase = '''openai/whisper-base'''
_lowerCAmelCase = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
_lowerCAmelCase = '''transcriber'''
_lowerCAmelCase = WhisperProcessor
_lowerCAmelCase = WhisperForConditionalGeneration
_lowerCAmelCase = ['''audio''']
_lowerCAmelCase = ['''text''']
def __snake_case ( self , UpperCamelCase__ ):
return self.pre_processor(UpperCamelCase__ , return_tensors='''pt''' ).input_features
def __snake_case ( self , UpperCamelCase__ ):
return self.model.generate(inputs=UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
return self.pre_processor.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )[0]
| 707
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __snake_case ( UpperCamelCase__ ):
raise NotImplementedError()
@abstractmethod
def __snake_case ( self ):
raise NotImplementedError()
| 55
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : Tuple = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 708
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=[30, 30] , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=8 , UpperCamelCase__=10 , ):
A__ : Optional[int] = parent
A__ : List[Any] = batch_size
A__ : Dict = image_size
A__ : Any = patch_size
A__ : Dict = num_channels
A__ : List[Any] = is_training
A__ : int = use_labels
A__ : Any = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : str = hidden_act
A__ : str = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Optional[int] = type_sequence_label_size
A__ : Any = initializer_range
A__ : Optional[int] = num_labels
A__ : Union[str, Any] = scope
A__ : Union[str, Any] = n_targets
A__ : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A__ : List[str] = num_patches + 1 + self.num_detection_tokens
def __snake_case ( self ):
A__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A__ : Tuple = []
for i in range(self.batch_size ):
A__ : List[Any] = {}
A__ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ )
A__ : Any = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ )
labels.append(UpperCamelCase__ )
A__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = YolosModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = YolosForObjectDetection(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ )
A__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __snake_case ( self ):
A__ : Optional[int] = self.prepare_config_and_inputs()
A__ , A__ , A__ : Optional[Any] = config_and_inputs
A__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Optional[int] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A__ : str = []
for i in range(self.model_tester.batch_size ):
A__ : int = {}
A__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long )
A__ : Dict = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float )
labels.append(UpperCamelCase__ )
A__ : Dict = labels
return inputs_dict
def __snake_case ( self ):
A__ : List[Any] = YolosModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
# YOLOS does not use inputs_embeds
pass
def __snake_case ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(UpperCamelCase__ )
A__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] = [*signature.parameters.keys()]
A__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ , A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Tuple = True
# in YOLOS, the seq_len is different
A__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A__ : Any = True
A__ : Optional[int] = False
A__ : Optional[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Tuple = True
A__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ : List[Any] = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[str] = True
A__ : List[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
A__ : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] = outputs.hidden_states
A__ : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# YOLOS has a different seq_length
A__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Tuple = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase__ )
A__ : str = self.default_image_processor
A__ : Tuple = prepare_img()
A__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Any = model(inputs.pixel_values )
# verify outputs
A__ : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=UpperCamelCase__ , )
A__ : Optional[int] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify postprocessing
A__ : Dict = image_processor.post_process_object_detection(
UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A__ : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCamelCase__ )
A__ : str = [75, 75, 17, 63, 17]
A__ : Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(UpperCamelCase__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase__ ) )
| 55
| 0
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
_SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
_SCREAMING_SNAKE_CASE : Optional[Any] = 'cpu'
_SCREAMING_SNAKE_CASE : Any = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
_SCREAMING_SNAKE_CASE : Any = 'path-to-your-trained-model'
_SCREAMING_SNAKE_CASE : int = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_SCREAMING_SNAKE_CASE : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(device)
# to channels last
_SCREAMING_SNAKE_CASE : Tuple = pipe.unet.to(memory_format=torch.channels_last)
_SCREAMING_SNAKE_CASE : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
_SCREAMING_SNAKE_CASE : List[str] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_SCREAMING_SNAKE_CASE : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_SCREAMING_SNAKE_CASE : Any = torch.randn(2, 4, 6_4, 6_4)
_SCREAMING_SNAKE_CASE : List[str] = torch.rand(1) * 9_9_9
_SCREAMING_SNAKE_CASE : int = torch.randn(2, 7_7, 7_6_8)
_SCREAMING_SNAKE_CASE : Union[str, Any] = (sample, timestep, encoder_hidden_status)
try:
_SCREAMING_SNAKE_CASE : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_SCREAMING_SNAKE_CASE : List[str] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_SCREAMING_SNAKE_CASE : Optional[int] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_SCREAMING_SNAKE_CASE : Any = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_SCREAMING_SNAKE_CASE : Union[str, Any] = 6_6_6
_SCREAMING_SNAKE_CASE : Any = torch.Generator(device).manual_seed(seed)
_SCREAMING_SNAKE_CASE : List[str] = {'generator': generator}
if args.steps is not None:
_SCREAMING_SNAKE_CASE : List[str] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_SCREAMING_SNAKE_CASE : Optional[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 709
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
A__ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
A__ : Dict = 0
A__ : Optional[int] = 2
while digits < n:
index += 1
A__ : Dict = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 55
| 0
|
import unittest
from transformers import DonutProcessor
_SCREAMING_SNAKE_CASE : int = 'naver-clova-ix/donut-base'
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Any = DonutProcessor.from_pretrained(UpperCamelCase__ )
def __snake_case ( self ):
A__ : List[Any] = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
A__ : Tuple = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
A__ : Optional[Any] = self.processor.tokenajson(UpperCamelCase__ )
self.assertDictEqual(UpperCamelCase__ , UpperCamelCase__ )
| 710
|
_SCREAMING_SNAKE_CASE : List[str] = range(2, 2_0 + 1)
_SCREAMING_SNAKE_CASE : Optional[Any] = [1_0**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
A__ : Tuple = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
A__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
A__ , A__ : Optional[int] = 0, 0
A__ : List[Any] = n - i
A__ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
A__ : Optional[int] = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
A__ : List[Any] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ : List[str] = _k
break
if max_jump >= 0:
A__ , A__ , A__ : List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
A__ : int = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A__ : List[Any] = []
else:
A__ : Optional[Any] = {c: []}
A__ : int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ , A__ : str = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ , A__ : str = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
A__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ : List[Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ : Optional[Any] = i
A__ , A__ , A__ : Dict = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ : int = ds_c + ds_b
diff += addend
A__ : List[Any] = 0
for j in range(__UpperCamelCase ):
A__ : Optional[Any] = a_i[j] + addend
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
A__ : Any = digits[j] + addend
if s >= 10:
A__ , A__ : Union[str, Any] = divmod(__UpperCamelCase , 10 )
A__ : Optional[int] = addend // 10 + quotient
else:
A__ : Any = s
A__ : Dict = addend // 10
if addend == 0:
break
while addend > 0:
A__ , A__ : Dict = divmod(__UpperCamelCase , 10 )
digits.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**15 ) -> int:
"""simple docstring"""
A__ : List[Any] = [1]
A__ : Dict = 1
A__ : Tuple = 0
while True:
A__ , A__ : List[str] = next_term(__UpperCamelCase , 20 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
A__ : List[str] = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
| 0
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.0_2 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__="None" , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ):
A__ : Optional[Any] = parent
A__ : Tuple = batch_size
A__ : Optional[Any] = seq_length
A__ : str = is_training
A__ : Union[str, Any] = use_input_mask
A__ : List[Any] = use_token_type_ids
A__ : str = use_labels
A__ : Optional[Any] = vocab_size
A__ : List[Any] = hidden_size
A__ : List[Any] = num_hidden_layers
A__ : int = num_attention_heads
A__ : Optional[int] = intermediate_size
A__ : List[str] = hidden_act
A__ : Union[str, Any] = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Union[str, Any] = max_position_embeddings
A__ : Optional[Any] = type_vocab_size
A__ : List[Any] = type_sequence_label_size
A__ : Dict = initializer_range
A__ : Dict = num_labels
A__ : Optional[Any] = num_choices
A__ : List[str] = relative_attention
A__ : int = position_biased_input
A__ : Optional[Any] = pos_att_type
A__ : str = scope
def __snake_case ( self ):
A__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : str = None
if self.use_input_mask:
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A__ : List[Any] = None
if self.use_token_type_ids:
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Dict = None
A__ : Any = None
A__ : List[Any] = None
if self.use_labels:
A__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __snake_case ( self , UpperCamelCase__ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
A__ : str = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
A__ : int = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : str = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : int = self.num_labels
A__ : List[Any] = DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[str] = self.num_labels
A__ : Union[str, Any] = DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Union[str, Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Dict = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : int = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] = DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Optional[Any] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self ):
A__ : Tuple = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Optional[Any] = config_and_inputs
A__ : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __UpperCAmelCase, __UpperCAmelCase, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self ):
A__ : Tuple = DebertaVaModelTester(self )
A__ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
A__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def __snake_case ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def __snake_case ( self ):
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def __snake_case ( self ):
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def __snake_case ( self ):
A__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def __snake_case ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='''Model not available yet''' )
def __snake_case ( self ):
pass
@slow
def __snake_case ( self ):
A__ : List[str] = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
A__ : str = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
A__ : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ : List[str] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
A__ : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 711
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=False ) -> Tuple:
"""simple docstring"""
try:
A__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
A__ : Union[str, Any] = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if test_case is None:
return partial(__UpperCamelCase , version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , __UpperCamelCase ) , F"test requires torch version >= {version}" )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__UpperCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = True
@classmethod
def __snake_case ( cls ):
A__ : Tuple = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Any:
"""simple docstring"""
A__ : int = AcceleratorState()
A__ : Any = tensor[None].clone().to(state.device )
A__ : Optional[int] = gather(__UpperCamelCase ).cpu()
A__ : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCamelCase ):
return False
return True
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = returncode
A__ : Union[str, Any] = stdout
A__ : Dict = stderr
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
while True:
A__ : Tuple = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) )
A__ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ : List[Any] = []
A__ : str = []
def tee(__UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any]="" ):
A__ : Optional[Any] = line.decode('''utf-8''' ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=1_80 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
A__ : Dict = asyncio.get_event_loop()
A__ : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
A__ : Union[str, Any] = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
A__ : Optional[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=False ) -> Dict:
"""simple docstring"""
try:
A__ : List[Any] = subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase , '''decode''' ):
A__ : Any = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 55
| 0
|
from __future__ import annotations
_SCREAMING_SNAKE_CASE : int = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[int]] , __UpperCamelCase : list[int] , __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
A__ : Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(a_ ) )
] # the reference grid
A__ : Optional[Any] = 1
A__ : Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(a_ ) )
] # the action grid
A__ : Any = init[0]
A__ : Any = init[1]
A__ : List[Any] = 0
A__ : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
A__ : int = [[f, g, x, y]]
A__ : Optional[Any] = False # flag that is set when search is complete
A__ : List[Any] = False # flag set if we can't find expand
while not found and not resign:
if len(a_ ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A__ : List[str] = cell.pop()
A__ : Optional[int] = next_cell[2]
A__ : int = next_cell[3]
A__ : Union[str, Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
A__ : Optional[int] = True
else:
for i in range(len(a_ ) ): # to try out different valid actions
A__ : List[Any] = x + DIRECTIONS[i][0]
A__ : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(a_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A__ : List[Any] = g + cost
A__ : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A__ : Any = 1
A__ : int = i
A__ : Tuple = []
A__ : int = goal[0]
A__ : Union[str, Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A__ : str = x - DIRECTIONS[action[x][y]][0]
A__ : str = y - DIRECTIONS[action[x][y]][1]
A__ : List[Any] = xa
A__ : Union[str, Any] = ya
invpath.append([x, y] )
A__ : Optional[Any] = []
for i in range(len(a_ ) ):
path.append(invpath[len(a_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_SCREAMING_SNAKE_CASE : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
_SCREAMING_SNAKE_CASE : Optional[Any] = [len(grid) - 1, len(grid[0]) - 1]
_SCREAMING_SNAKE_CASE : Any = 1
# the cost map which pushes the path closer to the goal
_SCREAMING_SNAKE_CASE : Optional[int] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_SCREAMING_SNAKE_CASE : Dict = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_SCREAMING_SNAKE_CASE : Union[str, Any] = 9_9
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 712
|
import numpy as np
_SCREAMING_SNAKE_CASE : Any = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : List[Any] = np.array(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ , A__ : Any = np.where(letter == self.SQUARE )
A__ : int = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = message.lower()
A__ : str = message.replace(''' ''' , '''''' )
A__ : Union[str, Any] = message.replace('''j''' , '''i''' )
A__ : List[Any] = np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : Any = self.letter_to_numbers(message[letter_index] )
A__ : Optional[Any] = numbers[0]
A__ : List[str] = numbers[1]
A__ : List[str] = first_step.reshape(2 * len(UpperCamelCase__ ) )
A__ : List[Any] = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Dict = int(second_step[numbers_index * 2] )
A__ : List[str] = int(second_step[(numbers_index * 2) + 1] )
A__ : Dict = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = encoded_message + letter
return encoded_message
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = message.lower()
message.replace(''' ''' , '''''' )
A__ : List[Any] = np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : List[str] = self.letter_to_numbers(message[letter_index] )
A__ : Dict = numbers[0]
A__ : int = numbers[1]
A__ : Optional[Any] = first_step.reshape((2, len(UpperCamelCase__ )) )
A__ : int = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Tuple = int(second_step[0, numbers_index] )
A__ : Dict = int(second_step[1, numbers_index] )
A__ : List[str] = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = decoded_message + letter
return decoded_message
| 55
| 0
|
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCamelCase__ ( _a ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ ):
A__ : Tuple = parent
A__ : Optional[Any] = config_class
A__ : Any = has_text_modality
A__ : Dict = kwargs
A__ : Optional[int] = common_properties
def __snake_case ( self ):
A__ : Optional[Any] = self.config_class(**self.inputs_dict )
A__ : Tuple = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_A , _A ) , msg=F"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(_A ):
try:
setattr(_A , _A , _A )
self.parent.assertEqual(
getattr(_A , _A ) , _A , msg=F"`{name} value {idx} expected, but was {getattr(_A , _A )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_A ):
try:
A__ : Optional[Any] = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_A , _A ) , _A , msg=F"`{name} value {idx} expected, but was {getattr(_A , _A )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __snake_case ( self ):
A__ : Dict = self.config_class(**self.inputs_dict )
A__ : Union[str, Any] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _A )
def __snake_case ( self ):
A__ : str = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[str] = os.path.join(_A , '''config.json''' )
config_first.to_json_file(_A )
A__ : Dict = self.config_class.from_json_file(_A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __snake_case ( self ):
A__ : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_A )
A__ : Tuple = self.config_class.from_pretrained(_A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __snake_case ( self ):
A__ : List[Any] = self.config_class(**self.inputs_dict )
A__ : List[str] = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[Any] = os.path.join(_A , _A )
config_first.save_pretrained(_A )
A__ : Optional[int] = self.config_class.from_pretrained(_A , subfolder=_A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __snake_case ( self ):
A__ : List[Any] = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
A__ : Optional[int] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __snake_case ( self ):
if self.config_class.is_composition:
return
A__ : int = self.config_class()
self.parent.assertIsNotNone(_A )
def __snake_case ( self ):
A__ : List[str] = copy.deepcopy(_A )
A__ : List[str] = self.config_class(**_A )
A__ : Dict = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(_A , _A ) != value:
wrong_values.append((key, getattr(_A , _A ), value) )
if len(_A ) > 0:
A__ : Any = '''\n'''.join([F"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(F"The following keys were not properly set in the config:\n{errors}" )
def __snake_case ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 713
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 55
| 0
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[float]] ) -> list[list[float]]:
"""simple docstring"""
A__ : int = []
for data in source_data:
for i, el in enumerate(UpperCAmelCase__ ):
if len(UpperCAmelCase__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(UpperCAmelCase__ ) )
return data_lists
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[float]] , __UpperCamelCase : list[int] ) -> list[list[float]]:
"""simple docstring"""
A__ : List[str] = []
for dlist, weight in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
A__ : Union[str, Any] = min(UpperCAmelCase__ )
A__ : Any = max(UpperCAmelCase__ )
A__ : Optional[int] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
A__ : Any = F"Invalid weight of {weight:f} provided"
raise ValueError(UpperCAmelCase__ )
score_lists.append(UpperCAmelCase__ )
return score_lists
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[float]] ) -> list[float]:
"""simple docstring"""
A__ : Union[str, Any] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(UpperCAmelCase__ ):
A__ : Tuple = final_scores[j] + ele
return final_scores
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[list[float]] , __UpperCamelCase : list[int] ) -> list[list[float]]:
"""simple docstring"""
A__ : Tuple = get_data(UpperCAmelCase__ )
A__ : Optional[Any] = calculate_each_score(UpperCAmelCase__ , UpperCAmelCase__ )
A__ : Any = generate_final_scores(UpperCAmelCase__ )
# append scores to source data
for i, ele in enumerate(UpperCAmelCase__ ):
source_data[i].append(UpperCAmelCase__ )
return source_data
| 714
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : str = 1_6
_SCREAMING_SNAKE_CASE : Tuple = 3_2
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__UpperCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : Optional[int] = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : int = 16
elif accelerator.mixed_precision != "no":
A__ : Any = 8
else:
A__ : Union[str, Any] = None
return tokenizer.pad(
__UpperCamelCase , padding='''longest''' , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
A__ : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : Dict = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __UpperCamelCase ) == "1":
A__ : List[str] = 2
# Initialize accelerator
A__ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Tuple = config['''lr''']
A__ : Dict = int(config['''num_epochs'''] )
A__ : int = int(config['''seed'''] )
A__ : Optional[Any] = int(config['''batch_size'''] )
A__ : int = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
A__ : Dict = MAX_GPU_BATCH_SIZE
set_seed(__UpperCamelCase )
A__ , A__ : int = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
A__ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
A__ : Any = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Dict = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ : Dict = model(**__UpperCamelCase )
A__ : Dict = outputs.loss
A__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
A__ : Optional[int] = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Union[str, Any] = model(**__UpperCamelCase )
A__ : int = outputs.logits.argmax(dim=-1 )
A__ , A__ : Optional[Any] = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__UpperCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
A__ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
A__ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ : Dict = parser.parse_args()
A__ : Any = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 0
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
super().__init__(
UpperCamelCase__ , question_encoder_tokenizer=UpperCamelCase__ , generator_tokenizer=UpperCamelCase__ , index=UpperCamelCase__ , init_retrieval=UpperCamelCase__ , )
A__ : str = None
def __snake_case ( self , UpperCamelCase__ ):
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
A__ : Union[str, Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
A__ : Union[str, Any] = str(distributed_port + 1 )
A__ : Union[str, Any] = dist.new_group(ranks=UpperCamelCase__ , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __snake_case ( self ):
return dist.get_rank(group=self.process_group ) == 0
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=torch.floataa ):
A__ : Union[str, Any] = torch.empty(UpperCamelCase__ , dtype=UpperCamelCase__ )
dist.scatter(UpperCamelCase__ , src=0 , scatter_list=UpperCamelCase__ , group=self.process_group )
return target_tensor
def __snake_case ( self ):
A__ : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
A__ : Union[str, Any] = next((addr for addr in addrs if addr.startswith('''e''' )) , UpperCamelCase__ )
return ifname
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
if not dist.is_initialized():
A__ , A__ : Optional[Any] = self._main_retrieve(UpperCamelCase__ , UpperCamelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase__ )
# distributed training
A__ : Any = dist.get_world_size(group=self.process_group )
# gather logic
A__ : Dict = None
if self._is_main():
A__ : Any = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCamelCase__ )]
dist.gather(torch.tensor(UpperCamelCase__ ) , dst=0 , gather_list=UpperCamelCase__ , group=self.process_group )
# scatter logic
A__ : Dict = question_hidden_states.shape[0]
A__ : str = []
A__ : Tuple = []
if self._is_main():
assert len(UpperCamelCase__ ) == world_size
A__ , A__ : Any = self._main_retrieve(torch.cat(UpperCamelCase__ ).numpy() , UpperCamelCase__ )
A__ , A__ : List[Any] = torch.tensor(UpperCamelCase__ ), torch.tensor(UpperCamelCase__ )
A__ : Union[str, Any] = self._chunk_tensor(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = self._chunk_tensor(UpperCamelCase__ , UpperCamelCase__ )
A__ : int = self._scattered(UpperCamelCase__ , [n_queries, n_docs] , target_type=torch.intaa )
A__ : Tuple = self._scattered(UpperCamelCase__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCamelCase__ )
| 715
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "microsoft/speecht5_tts"
_lowerCAmelCase = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_lowerCAmelCase = "text_reader"
_lowerCAmelCase = SpeechTaProcessor
_lowerCAmelCase = SpeechTaForTextToSpeech
_lowerCAmelCase = SpeechTaHifiGan
_lowerCAmelCase = ["text"]
_lowerCAmelCase = ["audio"]
def __snake_case ( self ):
if self.post_processor is None:
A__ : int = '''microsoft/speecht5_hifigan'''
super().setup()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
A__ : List[Any] = self.pre_processor(text=UpperCamelCase__ , return_tensors='''pt''' , truncation=UpperCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
A__ : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
A__ : Dict = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.post_processor(UpperCamelCase__ ).cpu().detach()
| 55
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(__UpperCamelCase ):
print(F"{i}\t\t{d}" )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
for j in range(__UpperCamelCase ):
A__ , A__ , A__ : Any = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Optional[int] ) -> int:
"""simple docstring"""
A__ : Optional[int] = [float('''inf''' )] * vertex_count
A__ : Tuple = 0.0
for _ in range(vertex_count - 1 ):
for j in range(__UpperCamelCase ):
A__ , A__ , A__ : Any = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
A__ : int = distance[u] + w
A__ : Optional[Any] = check_negative_cycle(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE : Optional[Any] = int(input('Enter number of vertices: ').strip())
_SCREAMING_SNAKE_CASE : List[str] = int(input('Enter number of edges: ').strip())
_SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
_SCREAMING_SNAKE_CASE : int = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
_SCREAMING_SNAKE_CASE : Dict = {"""src""": src, """dst""": dest, """weight""": weight}
_SCREAMING_SNAKE_CASE : Optional[int] = int(input('\nEnter shortest path source:').strip())
_SCREAMING_SNAKE_CASE : Any = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 716
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'gpt-neox-20b': 2_0_4_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : List[Any] = add_prefix_space
A__ : Any = pre_tok_class(**UpperCamelCase__ )
A__ : List[Any] = add_prefix_space
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
A__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 55
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE : Tuple = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "deformable_detr"
_lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=3 , UpperCamelCase__=300 , UpperCamelCase__=1024 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1.0 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="sine" , UpperCamelCase__="resnet50" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=False , UpperCamelCase__=300 , UpperCamelCase__=False , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , UpperCamelCase__=0.2_5 , UpperCamelCase__=False , **UpperCamelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A__ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = backbone_config.get('''model_type''' )
A__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
A__ : Optional[int] = config_class.from_dict(UpperCamelCase__ )
A__ : Tuple = use_timm_backbone
A__ : int = backbone_config
A__ : List[Any] = num_channels
A__ : List[Any] = num_queries
A__ : str = max_position_embeddings
A__ : Tuple = d_model
A__ : int = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : Optional[Any] = encoder_attention_heads
A__ : List[Any] = decoder_ffn_dim
A__ : Tuple = decoder_layers
A__ : Optional[Any] = decoder_attention_heads
A__ : List[str] = dropout
A__ : str = attention_dropout
A__ : List[Any] = activation_dropout
A__ : Any = activation_function
A__ : Optional[Any] = init_std
A__ : Union[str, Any] = init_xavier_std
A__ : Union[str, Any] = encoder_layerdrop
A__ : Optional[int] = auxiliary_loss
A__ : str = position_embedding_type
A__ : List[Any] = backbone
A__ : Optional[Any] = use_pretrained_backbone
A__ : Any = dilation
# deformable attributes
A__ : List[Any] = num_feature_levels
A__ : List[str] = encoder_n_points
A__ : int = decoder_n_points
A__ : List[Any] = two_stage
A__ : Dict = two_stage_num_proposals
A__ : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
A__ : List[str] = class_cost
A__ : List[Any] = bbox_cost
A__ : Any = giou_cost
# Loss coefficients
A__ : List[str] = mask_loss_coefficient
A__ : Union[str, Any] = dice_loss_coefficient
A__ : List[Any] = bbox_loss_coefficient
A__ : Tuple = giou_loss_coefficient
A__ : Optional[Any] = eos_coefficient
A__ : List[Any] = focal_alpha
A__ : List[str] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.encoder_attention_heads
@property
def __snake_case ( self ):
return self.d_model
def __snake_case ( self ):
A__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : Tuple = self.backbone_config.to_dict()
A__ : Optional[int] = self.__class__.model_type
return output
| 55
| 0
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Union[str, Any]:
"""simple docstring"""
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
A__ : str = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A__ : Any = 1
if upper_limit > 0:
A__ : Tuple = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(snake_case_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
_SCREAMING_SNAKE_CASE : Any = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 718
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] = 0
A__ : Optional[Any] = len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
if len(__UpperCamelCase ) <= 1:
return arr, 0
A__ : Optional[int] = len(__UpperCamelCase ) // 2
A__ : List[str] = arr[0:mid]
A__ : Union[str, Any] = arr[mid:]
A__ , A__ : List[Any] = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : Dict = _count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
A__ : Any = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
A__ : str = []
A__ : Tuple = 0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A__ : int = count_inversions_bf(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A__ : Optional[Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Dict = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
# an empty list should also have zero inversions
A__ : Union[str, Any] = []
A__ : Union[str, Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Any = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __snake_case ( self ):
A__ : int = 1
A__ : List[Any] = 3
A__ : Dict = (32, 32)
A__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase__ )
return image
@property
def __snake_case ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCamelCase__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def __snake_case ( self ):
torch.manual_seed(0 )
A__ : int = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __snake_case ( self ):
torch.manual_seed(0 )
A__ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(UpperCamelCase__ )
def __snake_case ( self ):
A__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ : Optional[int] = self.dummy_cond_unet_upscale
A__ : Any = DDPMScheduler()
A__ : Dict = DDIMScheduler(prediction_type='''v_prediction''' )
A__ : Optional[int] = self.dummy_vae
A__ : Any = self.dummy_text_encoder
A__ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A__ : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : Union[str, Any] = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A__ : Dict = StableDiffusionUpscalePipeline(
unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , max_noise_level=350 , )
A__ : List[Any] = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Dict = '''A painting of a squirrel eating a burger'''
A__ : int = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
A__ : Optional[int] = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
A__ : str = output.images
A__ : Optional[Any] = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
A__ : Optional[int] = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCamelCase__ , )[0]
A__ : Union[str, Any] = image[0, -3:, -3:, -1]
A__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
A__ : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
A__ : int = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __snake_case ( self ):
A__ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.dummy_cond_unet_upscale
A__ : Union[str, Any] = DDPMScheduler()
A__ : Optional[int] = DDIMScheduler(prediction_type='''v_prediction''' )
A__ : List[str] = self.dummy_vae
A__ : Any = self.dummy_text_encoder
A__ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A__ : Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : Any = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A__ : List[str] = StableDiffusionUpscalePipeline(
unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , max_noise_level=350 , )
A__ : int = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : str = '''A painting of a squirrel eating a burger'''
A__ : Union[str, Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
A__ : List[str] = output.images
assert image.shape[0] == 2
A__ : int = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
A__ : str = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
A__ : int = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __snake_case ( self ):
A__ : List[str] = self.dummy_cond_unet_upscale
A__ : Union[str, Any] = DDPMScheduler()
A__ : Tuple = DDIMScheduler(prediction_type='''v_prediction''' )
A__ : Optional[Any] = self.dummy_vae
A__ : Optional[int] = self.dummy_text_encoder
A__ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A__ : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : Optional[int] = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
A__ : int = unet.half()
A__ : str = text_encoder.half()
# make sure here that pndm scheduler skips prk
A__ : str = StableDiffusionUpscalePipeline(
unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , max_noise_level=350 , )
A__ : Union[str, Any] = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Any = '''A painting of a squirrel eating a burger'''
A__ : Union[str, Any] = torch.manual_seed(0 )
A__ : List[Any] = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , ).images
A__ : List[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self ):
A__ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
A__ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
A__ : Union[str, Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
A__ : Any = StableDiffusionUpscalePipeline.from_pretrained(UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A__ : List[str] = '''a cat sitting on a park bench'''
A__ : str = torch.manual_seed(0 )
A__ : Optional[int] = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''np''' , )
A__ : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def __snake_case ( self ):
A__ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
A__ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
A__ : Tuple = '''stabilityai/stable-diffusion-x4-upscaler'''
A__ : str = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase__ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A__ : List[Any] = '''a cat sitting on a park bench'''
A__ : int = torch.manual_seed(0 )
A__ : Dict = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''np''' , )
A__ : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
A__ : Optional[int] = '''stabilityai/stable-diffusion-x4-upscaler'''
A__ : Dict = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase__ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ : List[str] = '''a cat sitting on a park bench'''
A__ : Optional[int] = torch.manual_seed(0 )
A__ : Optional[Any] = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , output_type='''np''' , )
A__ : str = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 719
|
from PIL import Image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Image , __UpperCamelCase : float ) -> Image:
"""simple docstring"""
def brightness(__UpperCamelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_SCREAMING_SNAKE_CASE : Dict = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 55
| 0
|
from PIL import Image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : str ) -> Image:
"""simple docstring"""
A__ : Dict = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(__UpperCamelCase : Union[str, Any] ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(lowerCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
_SCREAMING_SNAKE_CASE : Tuple = change_contrast(img, 1_7_0)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 720
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
def __snake_case ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
A__ : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
A__ : Dict = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
A__ : Optional[int] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : str = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 55
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
A__ : int = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
A__ : str = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
A__ : Union[str, Any] = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_SCREAMING_SNAKE_CASE : Union[str, Any] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_SCREAMING_SNAKE_CASE : Tuple = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_SCREAMING_SNAKE_CASE : Optional[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , ):
A__ : List[Any] = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
A__ : Dict = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
A__ : Optional[Any] = TER(
normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , )
A__ : str = sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 55
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : Tuple = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
# TODO Update this
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "esm"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1026 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[Any] = vocab_size
A__ : int = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : str = intermediate_size
A__ : List[str] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : List[str] = initializer_range
A__ : List[Any] = layer_norm_eps
A__ : int = position_embedding_type
A__ : Optional[Any] = use_cache
A__ : Optional[int] = emb_layer_norm_before
A__ : List[str] = token_dropout
A__ : Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
A__ : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = EsmFoldConfig(**UpperCamelCase__ )
A__ : int = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
A__ : Any = get_default_vocab_list()
else:
A__ : Dict = vocab_list
else:
A__ : Optional[Any] = None
A__ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __snake_case ( self ):
A__ : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
A__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 0
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.trunk is None:
A__ : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
A__ : List[Any] = TrunkConfig(**self.trunk )
def __snake_case ( self ):
A__ : Optional[int] = asdict(self )
A__ : int = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 48
_lowerCAmelCase = 1_024
_lowerCAmelCase = 128
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = False
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.structure_module is None:
A__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
A__ : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A__ : Tuple = self.sequence_state_dim // self.sequence_head_width
A__ : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __snake_case ( self ):
A__ : List[Any] = asdict(self )
A__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 384
_lowerCAmelCase = 128
_lowerCAmelCase = 16
_lowerCAmelCase = 128
_lowerCAmelCase = 12
_lowerCAmelCase = 4
_lowerCAmelCase = 8
_lowerCAmelCase = 0.1
_lowerCAmelCase = 8
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 7
_lowerCAmelCase = 10
_lowerCAmelCase = 1e-8
_lowerCAmelCase = 1e5
def __snake_case ( self ):
return asdict(self )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55
| 0
|
from __future__ import annotations
import bisect
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : int = 0 , __UpperCamelCase : int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
A__ : Union[str, Any] = len(__UpperCamelCase )
while lo < hi:
A__ : str = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
A__ : List[Any] = mid + 1
else:
A__ : Optional[int] = mid
return lo
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : int = 0 , __UpperCamelCase : int = -1 ) -> int:
"""simple docstring"""
if hi < 0:
A__ : Dict = len(__UpperCamelCase )
while lo < hi:
A__ : int = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
A__ : Dict = mid + 1
else:
A__ : int = mid
return lo
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : int = 0 , __UpperCamelCase : int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_left(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : int = 0 , __UpperCamelCase : int = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_right(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : int ) -> int | None:
"""simple docstring"""
A__ : List[Any] = 0
A__ : Union[str, Any] = len(__UpperCamelCase ) - 1
while left <= right:
A__ : Dict = left + (right - left) // 2
A__ : Optional[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
A__ : Optional[Any] = midpoint - 1
else:
A__ : str = midpoint + 1
return None
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : int ) -> int | None:
"""simple docstring"""
A__ : Dict = bisect.bisect_left(__UpperCamelCase , __UpperCamelCase )
if index != len(__UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> int | None:
"""simple docstring"""
if right < left:
return None
A__ : Tuple = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCamelCase , __UpperCamelCase , midpoint + 1 , __UpperCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = input('Enter numbers separated by comma:\n').strip()
_SCREAMING_SNAKE_CASE : Union[str, Any] = sorted(int(item) for item in user_input.split(','))
_SCREAMING_SNAKE_CASE : List[Any] = int(input('Enter a single number to be found in the list:\n'))
_SCREAMING_SNAKE_CASE : Tuple = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 701
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1024 , UpperCamelCase__=1024 , UpperCamelCase__=3.6 ):
A__ : str = tokenizer
A__ : int = tokenizer.bos_token_id
A__ : List[Any] = dataset
A__ : Tuple = seq_length
A__ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
A__ : Dict = iter(self.dataset )
A__ : Tuple = True
while more_examples:
A__ , A__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ : Dict = False
break
A__ : str = tokenizer(UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
A__ : Optional[int] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCamelCase__ ) , self.seq_length ):
A__ : Optional[int] = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase__ ) == self.seq_length:
yield torch.tensor(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
A__ : Any = {'''streaming''': True}
A__ : List[str] = load_dataset(args.dataset_name , split='''train''' , **__UpperCamelCase )
A__ : List[str] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
A__ : int = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
model.eval()
A__ : Dict = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
A__ : Any = model(__UpperCamelCase , labels=__UpperCamelCase )
A__ : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ : Tuple = torch.mean(torch.cat(__UpperCamelCase ) )
try:
A__ : Optional[Any] = torch.exp(__UpperCamelCase )
except OverflowError:
A__ : Union[str, Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_SCREAMING_SNAKE_CASE : List[Any] = Accelerator()
# Parse configuration
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(EvaluationArguments)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_SCREAMING_SNAKE_CASE : Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 55
| 0
|
import csv
import tweepy
# Twitter API credentials
_SCREAMING_SNAKE_CASE : Tuple = ''
_SCREAMING_SNAKE_CASE : Any = ''
_SCREAMING_SNAKE_CASE : Tuple = ''
_SCREAMING_SNAKE_CASE : Dict = ''
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> None:
"""simple docstring"""
A__ : Union[str, Any] = tweepy.OAuthHandler(__UpperCamelCase , __UpperCamelCase )
auth.set_access_token(__UpperCamelCase , __UpperCamelCase )
A__ : Tuple = tweepy.API(__UpperCamelCase )
# initialize a list to hold all the tweepy Tweets
A__ : List[Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
A__ : List[str] = api.user_timeline(screen_name=__UpperCamelCase , count=2_00 )
# save most recent tweets
alltweets.extend(__UpperCamelCase )
# save the id of the oldest tweet less one
A__ : Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__UpperCamelCase ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
A__ : Union[str, Any] = api.user_timeline(
screen_name=__UpperCamelCase , count=2_00 , max_id=__UpperCamelCase )
# save most recent tweets
alltweets.extend(__UpperCamelCase )
# update the id of the oldest tweet less one
A__ : Optional[int] = alltweets[-1].id - 1
print(F"...{len(__UpperCamelCase )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
A__ : Any = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , '''w''' ) as f:
A__ : Optional[Any] = csv.writer(__UpperCamelCase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__UpperCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 702
|
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 55
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Any = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 703
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Dict = inspect.getfile(accelerate.test_utils )
A__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A__ : Tuple = test_metrics
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __snake_case ( self ):
self.test_metrics.main()
@require_multi_gpu
def __snake_case ( self ):
print(F"Found {torch.cuda.device_count()} devices." )
A__ : int = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 55
| 0
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=16 , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=14 , UpperCamelCase__=10 , UpperCamelCase__=19 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=True , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=[1, 2, 3, 4, 5] , UpperCamelCase__=25 , UpperCamelCase__=5 , ):
A__ : Optional[int] = d_model
A__ : Dict = parent
A__ : List[Any] = batch_size
A__ : Dict = prediction_length
A__ : List[Any] = context_length
A__ : Union[str, Any] = cardinality
A__ : int = num_time_features
A__ : Optional[Any] = lags_sequence
A__ : Optional[Any] = embedding_dimension
A__ : Any = is_training
A__ : Dict = hidden_size
A__ : List[Any] = num_hidden_layers
A__ : List[str] = num_attention_heads
A__ : Optional[int] = intermediate_size
A__ : Optional[Any] = hidden_act
A__ : Optional[int] = hidden_dropout_prob
A__ : Any = attention_probs_dropout_prob
A__ : List[str] = context_length
A__ : Any = prediction_length + label_length
A__ : List[str] = label_length
A__ : str = moving_average
A__ : List[str] = autocorrelation_factor
def __snake_case ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __snake_case ( self , UpperCamelCase__ ):
A__ : int = config.context_length + max(config.lags_sequence )
A__ : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
A__ : List[str] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
A__ : int = floats_tensor([self.batch_size, _past_length] )
A__ : Optional[int] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
A__ : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
A__ : Optional[int] = floats_tensor([self.batch_size, config.prediction_length] )
A__ : str = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def __snake_case ( self ):
A__ : Tuple = self.get_config()
A__ : Tuple = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def __snake_case ( self ):
A__ : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
A__ : List[str] = model(**UpperCamelCase__ )
A__ : Any = outputs.encoder_last_hidden_state
A__ : str = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Tuple = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
A__ : str = model.create_network_inputs(**UpperCamelCase__ )
A__ : Dict = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
A__ : Tuple = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
A__ : List[Any] = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
A__ : Dict = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
A__ : Optional[Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
A__ : Tuple = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
A__ : str = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : str = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
A__ : str = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
A__ : Any = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowerCAmelCase = (AutoformerForPrediction,) if is_torch_available() else ()
_lowerCAmelCase = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self ):
A__ : List[str] = AutoformerModelTester(self )
A__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
A__ : int = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
A__ : int = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info['''missing_keys'''] , [] )
def __snake_case ( self ):
A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def __snake_case ( self ):
pass
def __snake_case ( self ):
A__ : Optional[Any] = inspect.signature(getattr(UpperCamelCase__ , '''forward''' ) )
# The main input is the name of the argument after `self`
A__ : Tuple = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : str = model_class(UpperCamelCase__ )
A__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : List[str] = [*signature.parameters.keys()]
A__ : int = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Dict = True
A__ : int = getattr(self.model_tester , '''seq_length''' , UpperCamelCase__ )
A__ : Union[str, Any] = getattr(self.model_tester , '''decoder_seq_length''' , UpperCamelCase__ )
A__ : Tuple = getattr(self.model_tester , '''encoder_seq_length''' , UpperCamelCase__ )
A__ : Dict = getattr(self.model_tester , '''d_model''' , UpperCamelCase__ )
A__ : Dict = getattr(self.model_tester , '''num_attention_heads''' , UpperCamelCase__ )
A__ : Union[str, Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
A__ : List[Any] = True
A__ : Optional[int] = False
A__ : Union[str, Any] = True
A__ : Any = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : List[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Dict = True
A__ : Tuple = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Any = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
A__ : Union[str, Any] = len(UpperCamelCase__ )
A__ : List[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
A__ : str = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
A__ : Optional[Any] = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
A__ : Optional[int] = True
A__ : Optional[int] = True
A__ : Tuple = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Dict = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
A__ : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __snake_case ( self ):
super().test_retain_grad_hidden_states_attentions()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any]="train-batch.pt" ) -> str:
"""simple docstring"""
A__ : Dict = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=__UpperCamelCase , repo_type='''dataset''' )
A__ : Tuple = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )
return batch
@require_torch
@slow
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Dict = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(UpperCamelCase__ )
A__ : int = prepare_batch()
with torch.no_grad():
A__ : Any = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
A__ : Any = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
A__ : Any = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def __snake_case ( self ):
A__ : Tuple = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(UpperCamelCase__ )
A__ : Optional[Any] = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
A__ : int = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
A__ : Tuple = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
A__ : Dict = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def __snake_case ( self ):
A__ : int = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(UpperCamelCase__ )
A__ : int = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
A__ : Optional[int] = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
A__ : Tuple = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
A__ : str = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=UpperCamelCase__ )
A__ : List[str] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1e-1 ) )
| 704
|
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 0
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCamelCase__ :
'''simple docstring'''
@staticmethod
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ):
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_SCREAMING_SNAKE_CASE : Dict = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ = pipeline(
'''document-question-answering''' , model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
A__ = INVOICE_URL
A__ = list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , '''''' ) ) )
A__ = '''What is the placebo?'''
A__ = [
{
'''image''': load_image(UpperCamelCase__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ = dqa_pipeline(UpperCamelCase__ , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [
[
{'''score''': ANY(UpperCamelCase__ ), '''answer''': ANY(UpperCamelCase__ ), '''start''': ANY(UpperCamelCase__ ), '''end''': ANY(UpperCamelCase__ )},
{'''score''': ANY(UpperCamelCase__ ), '''answer''': ANY(UpperCamelCase__ ), '''start''': ANY(UpperCamelCase__ ), '''end''': ANY(UpperCamelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __snake_case ( self ):
A__ = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
A__ = INVOICE_URL
A__ = '''How many cats are there?'''
A__ = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
A__ = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , UpperCamelCase__ )
A__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , UpperCamelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
A__ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
A__ = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(UpperCamelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
A__ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
A__ = []
A__ = []
A__ = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , words=UpperCamelCase__ , boxes=UpperCamelCase__ , top_k=2 )
self.assertEqual(UpperCamelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __snake_case ( self ):
A__ = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
A__ = INVOICE_URL
A__ = '''What is the invoice number?'''
A__ = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
A__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
A__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __snake_case ( self ):
A__ = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
A__ = INVOICE_URL
A__ = '''What is the invoice number?'''
A__ = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
A__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
A__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __snake_case ( self ):
A__ = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCamelCase__ )
A__ = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCamelCase__ , revision='''3dc6de3''' , )
A__ = INVOICE_URL
A__ = '''What is the invoice number?'''
A__ = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
A__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
A__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
A__ = list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
A__ = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __snake_case ( self ):
A__ = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCamelCase__ )
A__ = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCamelCase__ , revision='''3dc6de3''' , max_seq_len=50 , )
A__ = INVOICE_URL
A__ = '''What is the invoice number?'''
A__ = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
A__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
A__ = list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
A__ = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def __snake_case ( self ):
A__ = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
A__ = INVOICE_URL
A__ = '''What is the invoice number?'''
A__ = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __snake_case ( self ):
pass
| 705
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : int = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "camembert"
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Dict = vocab_size
A__ : Dict = hidden_size
A__ : Dict = num_hidden_layers
A__ : str = num_attention_heads
A__ : List[Any] = hidden_act
A__ : Dict = intermediate_size
A__ : str = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Union[str, Any] = max_position_embeddings
A__ : int = type_vocab_size
A__ : List[str] = initializer_range
A__ : Optional[Any] = layer_norm_eps
A__ : Union[str, Any] = position_embedding_type
A__ : Optional[Any] = use_cache
A__ : str = classifier_dropout
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def __snake_case ( self ):
if self.task == "multiple-choice":
A__ : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A__ : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 706
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/vocab.json')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures')
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def __snake_case ( self ):
A__ : List[Any] = 0
def __snake_case ( self ):
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = WavaVecaConfig()
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
A__ : Any = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Dict = WavaVecaFeatureExtractor()
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : Optional[int] = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : str = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[int] = WavaVecaFeatureExtractor()
A__ : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : str = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : List[Any] = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(UpperCamelCase__ )
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write('''{}''' )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A__ : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
A__ : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A__ : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A__ : List[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A__ : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
A__ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __snake_case ( self ):
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : str = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : str = CustomTokenizer(UpperCamelCase__ )
A__ : Optional[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "AutoFeatureExtractor"
_lowerCAmelCase = "AutoTokenizer"
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local classes.
A__ : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A__ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A__ : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
A__ : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __snake_case ( self ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __snake_case ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __snake_case ( self ):
A__ : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A__ : List[Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
A__ : int = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
A__ : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : List[Any] = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : Union[str, Any] = CustomTokenizer(UpperCamelCase__ )
A__ : List[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
A__ : Union[str, Any] = Repository(UpperCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(UpperCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A__ : Tuple = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 55
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : Dict = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
_SCREAMING_SNAKE_CASE : Any = {
'roberta-base': 5_1_2,
'roberta-large': 5_1_2,
'roberta-large-mnli': 5_1_2,
'distilroberta-base': 5_1_2,
'roberta-base-openai-detector': 5_1_2,
'roberta-large-openai-detector': 5_1_2,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = RobertaTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="replace" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=False , UpperCamelCase__=True , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : int = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : str = add_prefix_space
A__ : int = pre_tok_class(**UpperCamelCase__ )
A__ : Any = add_prefix_space
A__ : Dict = '''post_processor'''
A__ : str = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
if tokenizer_component_instance:
A__ : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A__ : Tuple = tuple(state['''sep'''] )
if "cls" in state:
A__ : Tuple = tuple(state['''cls'''] )
A__ : Any = False
if state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Dict = add_prefix_space
A__ : Optional[Any] = True
if state.get('''trim_offsets''' , UpperCamelCase__ ) != trim_offsets:
A__ : Tuple = trim_offsets
A__ : Union[str, Any] = True
if changes_to_apply:
A__ : Optional[Any] = getattr(UpperCamelCase__ , state.pop('''type''' ) )
A__ : Union[str, Any] = component_class(**UpperCamelCase__ )
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
@property
def __snake_case ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value
A__ : List[str] = value
def __snake_case ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
A__ : List[str] = kwargs.get('''is_split_into_words''' , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
A__ : Tuple = kwargs.get('''is_split_into_words''' , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Union[str, Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
A__ : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : List[str] = [self.sep_token_id]
A__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 707
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __snake_case ( UpperCamelCase__ ):
raise NotImplementedError()
@abstractmethod
def __snake_case ( self ):
raise NotImplementedError()
| 55
| 0
|
from maths.prime_factors import prime_factors
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
A__ : Optional[Any] = F"Input value of [number={number}] must be an integer"
raise TypeError(__UpperCamelCase )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=[30, 30] , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=8 , UpperCamelCase__=10 , ):
A__ : Optional[int] = parent
A__ : List[Any] = batch_size
A__ : Dict = image_size
A__ : Any = patch_size
A__ : Dict = num_channels
A__ : List[Any] = is_training
A__ : int = use_labels
A__ : Any = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : str = hidden_act
A__ : str = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Optional[int] = type_sequence_label_size
A__ : Any = initializer_range
A__ : Optional[int] = num_labels
A__ : Union[str, Any] = scope
A__ : Union[str, Any] = n_targets
A__ : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A__ : List[str] = num_patches + 1 + self.num_detection_tokens
def __snake_case ( self ):
A__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A__ : Tuple = []
for i in range(self.batch_size ):
A__ : List[Any] = {}
A__ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ )
A__ : Any = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ )
labels.append(UpperCamelCase__ )
A__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = YolosModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = YolosForObjectDetection(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ )
A__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __snake_case ( self ):
A__ : Optional[int] = self.prepare_config_and_inputs()
A__ , A__ , A__ : Optional[Any] = config_and_inputs
A__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Optional[int] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A__ : str = []
for i in range(self.model_tester.batch_size ):
A__ : int = {}
A__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long )
A__ : Dict = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float )
labels.append(UpperCamelCase__ )
A__ : Dict = labels
return inputs_dict
def __snake_case ( self ):
A__ : List[Any] = YolosModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
# YOLOS does not use inputs_embeds
pass
def __snake_case ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(UpperCamelCase__ )
A__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] = [*signature.parameters.keys()]
A__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ , A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Tuple = True
# in YOLOS, the seq_len is different
A__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A__ : Any = True
A__ : Optional[int] = False
A__ : Optional[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Tuple = True
A__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ : List[Any] = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[str] = True
A__ : List[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
A__ : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] = outputs.hidden_states
A__ : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# YOLOS has a different seq_length
A__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Tuple = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase__ )
A__ : str = self.default_image_processor
A__ : Tuple = prepare_img()
A__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Any = model(inputs.pixel_values )
# verify outputs
A__ : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=UpperCamelCase__ , )
A__ : Optional[int] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify postprocessing
A__ : Dict = image_processor.post_process_object_detection(
UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A__ : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCamelCase__ )
A__ : str = [75, 75, 17, 63, 17]
A__ : Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(UpperCamelCase__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase__ ) )
| 55
| 0
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : bool , __UpperCamelCase : bool ) -> Optional[Any]:
"""simple docstring"""
def run_func(__UpperCamelCase : Optional[int] ):
@wraps(__UpperCamelCase )
def run_in_eager_mode(*__UpperCamelCase : Tuple , **__UpperCamelCase : Tuple ):
return func(*__UpperCamelCase , **__UpperCamelCase )
@wraps(__UpperCamelCase )
@tf.function(experimental_compile=__UpperCamelCase )
def run_in_graph_mode(*__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[int] ):
return func(*__UpperCamelCase , **__UpperCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> ["tf.Tensor"]:
"""simple docstring"""
A__ : Any = random.Random()
A__ : List[Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__UpperCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = "TensorFlow"
@property
def __snake_case ( self ):
return tf.__version__
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# initialize GPU on separate process
A__ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
A__ : str = self._prepare_inference_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_speed(_inference )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
A__ : Dict = self._prepare_train_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_speed(_train )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase__ )
A__ : int = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
A__ : List[Any] = self._prepare_inference_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_memory(_inference )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase__ )
A__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
A__ : str = self._prepare_train_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_memory(_train )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
A__ : Union[str, Any] = (
hasattr(UpperCamelCase__ , '''architectures''' )
and isinstance(config.architectures , UpperCamelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
A__ : Optional[int] = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
A__ : int = __import__('''transformers''' , fromlist=[model_class] )
A__ : List[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ )
A__ : Any = model_cls(UpperCamelCase__ )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
A__ : Tuple = TF_MODEL_MAPPING[config.__class__](UpperCamelCase__ )
# encoder-decoder has vocab size saved differently
A__ : List[str] = config.vocab_size if hasattr(UpperCamelCase__ , '''vocab_size''' ) else config.encoder.vocab_size
A__ : List[str] = random_input_ids(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , training=UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(UpperCamelCase__ , training=UpperCamelCase__ )
A__ : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
A__ : Any = (
hasattr(UpperCamelCase__ , '''architectures''' )
and isinstance(config.architectures , UpperCamelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
A__ : List[Any] = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
A__ : Tuple = __import__('''transformers''' , fromlist=[model_class] )
A__ : str = getattr(UpperCamelCase__ , UpperCamelCase__ )
A__ : Optional[int] = model_cls(UpperCamelCase__ )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
A__ : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCamelCase__ )
# encoder-decoder has vocab size saved differently
A__ : Tuple = config.vocab_size if hasattr(UpperCamelCase__ , '''vocab_size''' ) else config.encoder.vocab_size
A__ : str = random_input_ids(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
A__ : Union[str, Any] = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )[0]
A__ : Union[str, Any] = tf.gradients(UpperCamelCase__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
A__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )[0]
A__ : Dict = tf.gradients(UpperCamelCase__ , model.trainable_variables )
return gradients
A__ : Optional[Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __snake_case ( self , UpperCamelCase__ ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(UpperCamelCase__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
A__ : Dict = timeit.repeat(
UpperCamelCase__ , repeat=self.args.repeat , number=10 , )
return min(UpperCamelCase__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def __snake_case ( self , UpperCamelCase__ ):
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
A__ : Optional[Any] = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
A__ : int = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
A__ : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
A__ : str = nvml.nvmlDeviceGetMemoryInfo(UpperCamelCase__ )
A__ : Any = meminfo.used
A__ : int = Memory(UpperCamelCase__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
A__ : Any = None
else:
A__ : Optional[int] = measure_peak_memory_cpu(UpperCamelCase__ )
A__ : Dict = Memory(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
A__ : str = stop_memory_tracing(UpperCamelCase__ )
if memory is None:
A__ : Tuple = summary.total
else:
A__ : Optional[int] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 709
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
A__ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
A__ : Dict = 0
A__ : Optional[int] = 2
while digits < n:
index += 1
A__ : Dict = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 55
| 0
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_SCREAMING_SNAKE_CASE : Any = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int]=8 ) -> List[str]:
"""simple docstring"""
A__ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A__ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : int=5_12 , __UpperCamelCase : Optional[Any]=5_12 ) -> List[str]:
"""simple docstring"""
A__ : str = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
A__ : str = np.array(pil_image.convert('''RGB''' ) )
A__ : str = arr.astype(np.floataa ) / 1_27.5 - 1
A__ : List[str] = np.transpose(__UpperCamelCase , [2, 0, 1] )
A__ : List[str] = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
super().__init__()
self.register_modules(
unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , movq=UpperCamelCase__ , )
A__ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# get the original timestep using init_timestep
A__ : Tuple = min(int(num_inference_steps * strength ) , UpperCamelCase__ )
A__ : Optional[Any] = max(num_inference_steps - init_timestep , 0 )
A__ : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
if not isinstance(UpperCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCamelCase__ )}" )
A__ : List[str] = image.to(device=UpperCamelCase__ , dtype=UpperCamelCase__ )
A__ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
A__ : int = image
else:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(UpperCamelCase__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Dict = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCamelCase__ )
]
A__ : int = torch.cat(UpperCamelCase__ , dim=0 )
else:
A__ : str = self.movq.encode(UpperCamelCase__ ).latent_dist.sample(UpperCamelCase__ )
A__ : int = self.movq.config.scaling_factor * init_latents
A__ : List[Any] = torch.cat([init_latents] , dim=0 )
A__ : str = init_latents.shape
A__ : str = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ )
# get latents
A__ : str = self.scheduler.add_noise(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : str = init_latents
return latents
def __snake_case ( self , UpperCamelCase__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
A__ : Dict = torch.device(F"cuda:{gpu_id}" )
A__ : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
A__ : Any = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCamelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A__ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
A__ : Tuple = cpu_offload_with_hook(UpperCamelCase__ , UpperCamelCase__ , prev_module_hook=UpperCamelCase__ )
# We'll offload the last model manually.
A__ : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 512 , UpperCamelCase__ = 512 , UpperCamelCase__ = 100 , UpperCamelCase__ = 4.0 , UpperCamelCase__ = 0.3 , UpperCamelCase__ = 1 , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , ):
A__ : Any = self._execution_device
A__ : str = guidance_scale > 1.0
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = torch.cat(UpperCamelCase__ , dim=0 )
A__ : Tuple = image_embeds.shape[0]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = torch.cat(UpperCamelCase__ , dim=0 )
if do_classifier_free_guidance:
A__ : List[str] = image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 )
A__ : int = negative_image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 )
A__ : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] = [image]
if not all(isinstance(UpperCamelCase__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(UpperCamelCase__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
A__ : str = torch.cat([prepare_image(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for i in image] , dim=0 )
A__ : List[Any] = image.to(dtype=image_embeds.dtype , device=UpperCamelCase__ )
A__ : List[str] = self.movq.encode(UpperCamelCase__ )['''latents''']
A__ : Dict = latents.repeat_interleave(UpperCamelCase__ , dim=0 )
self.scheduler.set_timesteps(UpperCamelCase__ , device=UpperCamelCase__ )
A__ : Optional[int] = self.get_timesteps(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : List[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
A__ : Any = downscale_height_and_width(UpperCamelCase__ , UpperCamelCase__ , self.movq_scale_factor )
A__ : List[str] = self.prepare_latents(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , image_embeds.dtype , UpperCamelCase__ , UpperCamelCase__ )
for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
A__ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A__ : List[str] = {'''image_embeds''': image_embeds}
A__ : Optional[int] = self.unet(
sample=UpperCamelCase__ , timestep=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , added_cond_kwargs=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
if do_classifier_free_guidance:
A__ : Any = noise_pred.split(latents.shape[1] , dim=1 )
A__ : Optional[int] = noise_pred.chunk(2 )
A__ : List[str] = variance_pred.chunk(2 )
A__ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A__ : Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A__ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A__ : str = self.scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ , )[0]
# post-processing
A__ : List[Any] = self.movq.decode(UpperCamelCase__ , force_not_quantize=UpperCamelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
A__ : Any = image * 0.5 + 0.5
A__ : List[str] = image.clamp(0 , 1 )
A__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A__ : Tuple = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 710
|
_SCREAMING_SNAKE_CASE : List[str] = range(2, 2_0 + 1)
_SCREAMING_SNAKE_CASE : Optional[Any] = [1_0**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
A__ : Tuple = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
A__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
A__ , A__ : Optional[int] = 0, 0
A__ : List[Any] = n - i
A__ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
A__ : Optional[int] = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
A__ : List[Any] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ : List[str] = _k
break
if max_jump >= 0:
A__ , A__ , A__ : List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
A__ : int = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A__ : List[Any] = []
else:
A__ : Optional[Any] = {c: []}
A__ : int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ , A__ : str = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ , A__ : str = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
A__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ : List[Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ : Optional[Any] = i
A__ , A__ , A__ : Dict = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ : int = ds_c + ds_b
diff += addend
A__ : List[Any] = 0
for j in range(__UpperCamelCase ):
A__ : Optional[Any] = a_i[j] + addend
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
A__ : Any = digits[j] + addend
if s >= 10:
A__ , A__ : Union[str, Any] = divmod(__UpperCamelCase , 10 )
A__ : Optional[int] = addend // 10 + quotient
else:
A__ : Any = s
A__ : Dict = addend // 10
if addend == 0:
break
while addend > 0:
A__ , A__ : Dict = divmod(__UpperCamelCase , 10 )
digits.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**15 ) -> int:
"""simple docstring"""
A__ : List[Any] = [1]
A__ : Dict = 1
A__ : Tuple = 0
while True:
A__ , A__ : List[str] = next_term(__UpperCamelCase , 20 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
A__ : List[str] = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
| 0
|
from sklearn.metrics import fa_score
import datasets
_SCREAMING_SNAKE_CASE : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
_SCREAMING_SNAKE_CASE : Any = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
_SCREAMING_SNAKE_CASE : Union[str, Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=1 , UpperCamelCase__="binary" , UpperCamelCase__=None ):
A__ : Optional[Any] = fa_score(
UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ , pos_label=UpperCamelCase__ , average=UpperCamelCase__ , sample_weight=UpperCamelCase__ )
return {"f1": float(UpperCamelCase__ ) if score.size == 1 else score}
| 711
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=False ) -> Tuple:
"""simple docstring"""
try:
A__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
A__ : Union[str, Any] = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if test_case is None:
return partial(__UpperCamelCase , version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , __UpperCamelCase ) , F"test requires torch version >= {version}" )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__UpperCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = True
@classmethod
def __snake_case ( cls ):
A__ : Tuple = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Any:
"""simple docstring"""
A__ : int = AcceleratorState()
A__ : Any = tensor[None].clone().to(state.device )
A__ : Optional[int] = gather(__UpperCamelCase ).cpu()
A__ : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCamelCase ):
return False
return True
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = returncode
A__ : Union[str, Any] = stdout
A__ : Dict = stderr
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
while True:
A__ : Tuple = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) )
A__ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ : List[Any] = []
A__ : str = []
def tee(__UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any]="" ):
A__ : Optional[Any] = line.decode('''utf-8''' ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=1_80 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
A__ : Dict = asyncio.get_event_loop()
A__ : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
A__ : Union[str, Any] = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
A__ : Optional[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=False ) -> Dict:
"""simple docstring"""
try:
A__ : List[Any] = subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase , '''decode''' ):
A__ : Any = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 55
| 0
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
_SCREAMING_SNAKE_CASE : int = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
A__ : Tuple = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
A__ : int = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
A__ : Union[str, Any] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 712
|
import numpy as np
_SCREAMING_SNAKE_CASE : Any = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : List[Any] = np.array(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ , A__ : Any = np.where(letter == self.SQUARE )
A__ : int = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = message.lower()
A__ : str = message.replace(''' ''' , '''''' )
A__ : Union[str, Any] = message.replace('''j''' , '''i''' )
A__ : List[Any] = np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : Any = self.letter_to_numbers(message[letter_index] )
A__ : Optional[Any] = numbers[0]
A__ : List[str] = numbers[1]
A__ : List[str] = first_step.reshape(2 * len(UpperCamelCase__ ) )
A__ : List[Any] = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Dict = int(second_step[numbers_index * 2] )
A__ : List[str] = int(second_step[(numbers_index * 2) + 1] )
A__ : Dict = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = encoded_message + letter
return encoded_message
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = message.lower()
message.replace(''' ''' , '''''' )
A__ : List[Any] = np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : List[str] = self.letter_to_numbers(message[letter_index] )
A__ : Dict = numbers[0]
A__ : int = numbers[1]
A__ : Optional[Any] = first_step.reshape((2, len(UpperCamelCase__ )) )
A__ : int = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Tuple = int(second_step[0, numbers_index] )
A__ : Dict = int(second_step[1, numbers_index] )
A__ : List[str] = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = decoded_message + letter
return decoded_message
| 55
| 0
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = ["pixel_values"]
def __init__( self , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = PILImageResampling.BILINEAR , UpperCamelCase__ = True , UpperCamelCase__ = 1 / 255 , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ):
super().__init__(**UpperCamelCase__ )
A__ : Optional[Any] = size if size is not None else {'''shortest_edge''': 224}
A__ : Optional[int] = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
A__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
A__ : int = get_size_dict(UpperCamelCase__ , param_name='''crop_size''' )
A__ : Union[str, Any] = do_resize
A__ : str = size
A__ : List[str] = resample
A__ : Dict = do_rescale
A__ : Optional[Any] = rescale_factor
A__ : List[str] = do_center_crop
A__ : Tuple = crop_size
A__ : Dict = do_flip_channel_order
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = PIL.Image.BILINEAR , UpperCamelCase__ = None , **UpperCamelCase__ , ):
A__ : Tuple = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}" )
A__ : Any = get_resize_output_image_size(UpperCamelCase__ , size=size['''shortest_edge'''] , default_to_square=UpperCamelCase__ )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
A__ : str = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(UpperCamelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
return flip_channel_order(UpperCamelCase__ , data_format=UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , **UpperCamelCase__ , ):
A__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
A__ : List[Any] = resample if resample is not None else self.resample
A__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
A__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
A__ : List[Any] = size if size is not None else self.size
A__ : Dict = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
A__ : Any = crop_size if crop_size is not None else self.crop_size
A__ : Optional[Any] = get_size_dict(UpperCamelCase__ , param_name='''crop_size''' )
A__ : Optional[int] = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
A__ : List[Any] = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
A__ : List[Any] = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
A__ : Optional[Any] = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
A__ : Union[str, Any] = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
A__ : Dict = [self.flip_channel_order(image=UpperCamelCase__ ) for image in images]
A__ : Optional[Any] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
A__ : Tuple = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : List[str] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase__ ):
A__ : List[str] = target_sizes.numpy()
A__ : int = []
for idx in range(len(UpperCamelCase__ ) ):
A__ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
A__ : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
A__ : Dict = logits.argmax(dim=1 )
A__ : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 713
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 55
| 0
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Optional[Any] = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
A__ : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A__ : str = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
A__ : Dict = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_6000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
A__ : Optional[int] = tempfile.mkdtemp()
A__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A__ : str = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
# load decoder from hub
A__ : Any = '''hf-internal-testing/ngram-beam-search-decoder'''
def __snake_case ( self , **UpperCamelCase__ ):
A__ : int = self.add_kwargs_tokens_map.copy()
kwargs.update(UpperCamelCase__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __snake_case ( self , **UpperCamelCase__ ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __snake_case ( self , **UpperCamelCase__ ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **UpperCamelCase__ )
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
A__ : Any = self.get_tokenizer()
A__ : Any = self.get_feature_extractor()
A__ : List[str] = self.get_decoder()
A__ : Dict = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
A__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCamelCase__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , UpperCamelCase__ )
def __snake_case ( self ):
A__ : str = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
A__ : Tuple = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __snake_case ( self ):
A__ : List[str] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(UpperCamelCase__ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=UpperCamelCase__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __snake_case ( self ):
A__ : str = self.get_feature_extractor()
A__ : Dict = self.get_tokenizer()
A__ : str = self.get_decoder()
A__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A__ : Tuple = floats_list((3, 1000) )
A__ : List[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''np''' )
A__ : int = processor(UpperCamelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self ):
A__ : List[Any] = self.get_feature_extractor()
A__ : Optional[Any] = self.get_tokenizer()
A__ : List[Any] = self.get_decoder()
A__ : Dict = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A__ : Union[str, Any] = '''This is a test string'''
A__ : Optional[Any] = processor(text=UpperCamelCase__ )
A__ : str = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self , UpperCamelCase__=(2, 10, 16) , UpperCamelCase__=77 ):
np.random.seed(UpperCamelCase__ )
return np.random.rand(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[int] = self.get_feature_extractor()
A__ : Dict = self.get_tokenizer()
A__ : Optional[Any] = self.get_decoder()
A__ : int = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A__ : Any = self._get_dummy_logits(shape=(10, 16) , seed=13 )
A__ : str = processor.decode(UpperCamelCase__ )
A__ : List[Any] = decoder.decode_beams(UpperCamelCase__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def __snake_case ( self , UpperCamelCase__ ):
A__ : int = self.get_feature_extractor()
A__ : int = self.get_tokenizer()
A__ : int = self.get_decoder()
A__ : str = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A__ : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
A__ : Tuple = processor.batch_decode(UpperCamelCase__ )
else:
with get_context(UpperCamelCase__ ).Pool() as pool:
A__ : List[Any] = processor.batch_decode(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = list(UpperCamelCase__ )
with get_context('''fork''' ).Pool() as p:
A__ : str = decoder.decode_beams_batch(UpperCamelCase__ , UpperCamelCase__ )
A__ : int = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(UpperCamelCase__ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(UpperCamelCase__ , decoded_processor.logit_score )
self.assertListEqual(UpperCamelCase__ , decoded_processor.lm_score )
def __snake_case ( self ):
A__ : List[Any] = self.get_feature_extractor()
A__ : List[str] = self.get_tokenizer()
A__ : Dict = self.get_decoder()
A__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A__ : Optional[int] = self._get_dummy_logits()
A__ : Dict = 15
A__ : Union[str, Any] = -20.0
A__ : List[Any] = -4.0
A__ : List[Any] = processor.batch_decode(
UpperCamelCase__ , beam_width=UpperCamelCase__ , beam_prune_logp=UpperCamelCase__ , token_min_logp=UpperCamelCase__ , )
A__ : Optional[Any] = decoded_processor_out.text
A__ : str = list(UpperCamelCase__ )
with get_context('''fork''' ).Pool() as pool:
A__ : Dict = decoder.decode_beams_batch(
UpperCamelCase__ , UpperCamelCase__ , beam_width=UpperCamelCase__ , beam_prune_logp=UpperCamelCase__ , token_min_logp=UpperCamelCase__ , )
A__ : List[str] = [d[0][0] for d in decoded_decoder_out]
A__ : Any = [d[0][2] for d in decoded_decoder_out]
A__ : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , UpperCamelCase__ )
self.assertTrue(np.array_equal(UpperCamelCase__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , UpperCamelCase__ , atol=1e-3 ) )
self.assertTrue(np.array_equal(UpperCamelCase__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , UpperCamelCase__ , atol=1e-3 ) )
def __snake_case ( self ):
A__ : Optional[int] = self.get_feature_extractor()
A__ : Tuple = self.get_tokenizer()
A__ : Optional[int] = self.get_decoder()
A__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A__ : int = self._get_dummy_logits()
A__ : Optional[Any] = 2.0
A__ : Union[str, Any] = 5.0
A__ : Tuple = -20.0
A__ : Tuple = True
A__ : int = processor.batch_decode(
UpperCamelCase__ , alpha=UpperCamelCase__ , beta=UpperCamelCase__ , unk_score_offset=UpperCamelCase__ , lm_score_boundary=UpperCamelCase__ , )
A__ : Any = decoded_processor_out.text
A__ : int = list(UpperCamelCase__ )
decoder.reset_params(
alpha=UpperCamelCase__ , beta=UpperCamelCase__ , unk_score_offset=UpperCamelCase__ , lm_score_boundary=UpperCamelCase__ , )
with get_context('''fork''' ).Pool() as pool:
A__ : Optional[int] = decoder.decode_beams_batch(
UpperCamelCase__ , UpperCamelCase__ , )
A__ : List[str] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , UpperCamelCase__ )
A__ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , UpperCamelCase__ )
def __snake_case ( self ):
A__ : str = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A__ : Any = processor.decoder.model_container[processor.decoder._model_key]
A__ : Optional[Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
A__ : Optional[Any] = os.listdir(UpperCamelCase__ )
A__ : Tuple = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[Any] = snapshot_download('''hf-internal-testing/processor_with_lm''' )
A__ : Dict = WavaVecaProcessorWithLM.from_pretrained(UpperCamelCase__ )
A__ : List[str] = processor.decoder.model_container[processor.decoder._model_key]
A__ : int = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
A__ : Tuple = os.listdir(UpperCamelCase__ )
A__ : int = os.listdir(UpperCamelCase__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A__ : Any = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A__ : Dict = floats_list((3, 1000) )
A__ : List[Any] = processor_wavaveca(UpperCamelCase__ , return_tensors='''np''' )
A__ : Tuple = processor_auto(UpperCamelCase__ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
A__ : int = self._get_dummy_logits()
A__ : str = processor_wavaveca.batch_decode(UpperCamelCase__ )
A__ : Union[str, Any] = processor_auto.batch_decode(UpperCamelCase__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __snake_case ( self ):
A__ : Any = self.get_feature_extractor()
A__ : List[str] = self.get_tokenizer()
A__ : int = self.get_decoder()
A__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = [d[key] for d in offsets]
return retrieved_list
def __snake_case ( self ):
A__ : str = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A__ : str = self._get_dummy_logits()[0]
A__ : str = processor.decode(UpperCamelCase__ , output_word_offsets=UpperCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def __snake_case ( self ):
A__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A__ : Optional[int] = self._get_dummy_logits()
A__ : Any = processor.batch_decode(UpperCamelCase__ , output_word_offsets=UpperCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(UpperCamelCase__ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __snake_case ( self ):
import torch
A__ : Any = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=UpperCamelCase__ )
A__ : str = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_6000 ) )
A__ : int = iter(UpperCamelCase__ )
A__ : Dict = next(UpperCamelCase__ )
A__ : str = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
A__ : Optional[Any] = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
A__ : Union[str, Any] = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
A__ : Union[str, Any] = model(UpperCamelCase__ ).logits.cpu().numpy()
A__ : Tuple = processor.decode(logits[0] , output_word_offsets=UpperCamelCase__ )
A__ : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
A__ : List[Any] = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
A__ : Union[str, Any] = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(UpperCamelCase__ , '''word''' ) ) , UpperCamelCase__ )
self.assertEqual(''' '''.join(self.get_from_offsets(UpperCamelCase__ , '''word''' ) ) , output.text )
# output times
A__ : List[str] = torch.tensor(self.get_from_offsets(UpperCamelCase__ , '''start_time''' ) )
A__ : Tuple = torch.tensor(self.get_from_offsets(UpperCamelCase__ , '''end_time''' ) )
# fmt: off
A__ : str = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
A__ : List[str] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=0.0_1 ) )
| 714
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : str = 1_6
_SCREAMING_SNAKE_CASE : Tuple = 3_2
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__UpperCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : Optional[int] = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : int = 16
elif accelerator.mixed_precision != "no":
A__ : Any = 8
else:
A__ : Union[str, Any] = None
return tokenizer.pad(
__UpperCamelCase , padding='''longest''' , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
A__ : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : Dict = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __UpperCamelCase ) == "1":
A__ : List[str] = 2
# Initialize accelerator
A__ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Tuple = config['''lr''']
A__ : Dict = int(config['''num_epochs'''] )
A__ : int = int(config['''seed'''] )
A__ : Optional[Any] = int(config['''batch_size'''] )
A__ : int = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
A__ : Dict = MAX_GPU_BATCH_SIZE
set_seed(__UpperCamelCase )
A__ , A__ : int = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
A__ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
A__ : Any = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Dict = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ : Dict = model(**__UpperCamelCase )
A__ : Dict = outputs.loss
A__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
A__ : Optional[int] = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Union[str, Any] = model(**__UpperCamelCase )
A__ : int = outputs.logits.argmax(dim=-1 )
A__ , A__ : Optional[Any] = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__UpperCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
A__ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
A__ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ : Dict = parser.parse_args()
A__ : Any = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 0
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = (CMStochasticIterativeScheduler,)
_lowerCAmelCase = 10
def __snake_case ( self , **UpperCamelCase__ ):
A__ : str = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 80.0,
}
config.update(**UpperCamelCase__ )
return config
def __snake_case ( self ):
A__ : Tuple = 10
A__ : Any = self.get_scheduler_config()
A__ : Tuple = self.scheduler_classes[0](**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
A__ : Dict = scheduler.timesteps[0]
A__ : Dict = scheduler.timesteps[1]
A__ : List[str] = self.dummy_sample
A__ : str = 0.1 * sample
A__ : Dict = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
A__ : int = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __snake_case ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def __snake_case ( self ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCamelCase__ )
def __snake_case ( self ):
A__ : List[Any] = self.scheduler_classes[0]
A__ : Dict = self.get_scheduler_config()
A__ : List[Any] = scheduler_class(**UpperCamelCase__ )
A__ : Optional[Any] = 1
scheduler.set_timesteps(UpperCamelCase__ )
A__ : Optional[int] = scheduler.timesteps
A__ : Tuple = torch.manual_seed(0 )
A__ : Optional[int] = self.dummy_model()
A__ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCamelCase__ ):
# 1. scale model input
A__ : Tuple = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict noise residual
A__ : Dict = model(UpperCamelCase__ , UpperCamelCase__ )
# 3. predict previous sample x_t-1
A__ : Optional[int] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
A__ : Any = pred_prev_sample
A__ : Tuple = torch.sum(torch.abs(UpperCamelCase__ ) )
A__ : str = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1e-3
def __snake_case ( self ):
A__ : Optional[int] = self.scheduler_classes[0]
A__ : Any = self.get_scheduler_config()
A__ : str = scheduler_class(**UpperCamelCase__ )
A__ : Any = [106, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
A__ : int = scheduler.timesteps
A__ : Optional[int] = torch.manual_seed(0 )
A__ : int = self.dummy_model()
A__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
A__ : Any = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict noise residual
A__ : Optional[Any] = model(UpperCamelCase__ , UpperCamelCase__ )
# 3. predict previous sample x_t-1
A__ : Union[str, Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
A__ : Tuple = pred_prev_sample
A__ : List[Any] = torch.sum(torch.abs(UpperCamelCase__ ) )
A__ : str = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1e-3
def __snake_case ( self ):
A__ : Optional[int] = self.scheduler_classes[0]
A__ : str = self.get_scheduler_config()
A__ : int = scheduler_class(**UpperCamelCase__ )
A__ : Optional[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCamelCase__ , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
def __snake_case ( self ):
A__ : List[Any] = self.scheduler_classes[0]
A__ : Tuple = self.get_scheduler_config()
A__ : Optional[int] = scheduler_class(**UpperCamelCase__ )
A__ : Optional[int] = [39, 30, 12, 1, 0]
A__ : Tuple = len(UpperCamelCase__ )
with self.assertRaises(UpperCamelCase__ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase__ , timesteps=UpperCamelCase__ )
def __snake_case ( self ):
A__ : List[str] = self.scheduler_classes[0]
A__ : int = self.get_scheduler_config()
A__ : Union[str, Any] = scheduler_class(**UpperCamelCase__ )
A__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
| 715
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "microsoft/speecht5_tts"
_lowerCAmelCase = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_lowerCAmelCase = "text_reader"
_lowerCAmelCase = SpeechTaProcessor
_lowerCAmelCase = SpeechTaForTextToSpeech
_lowerCAmelCase = SpeechTaHifiGan
_lowerCAmelCase = ["text"]
_lowerCAmelCase = ["audio"]
def __snake_case ( self ):
if self.post_processor is None:
A__ : int = '''microsoft/speecht5_hifigan'''
super().setup()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
A__ : List[Any] = self.pre_processor(text=UpperCamelCase__ , return_tensors='''pt''' , truncation=UpperCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
A__ : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
A__ : Dict = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.post_processor(UpperCamelCase__ ).cpu().detach()
| 55
| 0
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
A__ : List[Any] = set()
# Replace all the whitespace in our sentence
A__ : Optional[int] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__UpperCamelCase ) == 26
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
A__ : int = [False] * 26
for char in input_str:
if char.islower():
A__ : Tuple = True
elif char.isupper():
A__ : Union[str, Any] = True
return all(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
from timeit import timeit
A__ : str = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=__UpperCamelCase ) )
print(timeit('''is_pangram_faster()''' , setup=__UpperCamelCase ) )
print(timeit('''is_pangram_fastest()''' , setup=__UpperCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 716
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'gpt-neox-20b': 2_0_4_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : List[Any] = add_prefix_space
A__ : Any = pre_tok_class(**UpperCamelCase__ )
A__ : List[Any] = add_prefix_space
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
A__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 55
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any]=False ):
"""simple docstring"""
A__ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Dict = ''''''
else:
A__ : Any = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Optional[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
A__ : Tuple = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ : Dict = in_proj_weight[
: config.hidden_size, :
]
A__ : List[Any] = in_proj_bias[: config.hidden_size]
A__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
A__ : Dict = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : Any = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int ):
"""simple docstring"""
A__ : List[Any] = dct.pop(__UpperCamelCase )
A__ : List[Any] = val
def SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A__ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ : Dict = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : str ):
"""simple docstring"""
A__ : List[str] = ViTConfig()
A__ : Union[str, Any] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A__ : Optional[int] = True
A__ : int = int(vit_name[-12:-10] )
A__ : List[Any] = int(vit_name[-9:-6] )
else:
A__ : Any = 10_00
A__ : str = '''huggingface/label-files'''
A__ : Optional[Any] = '''imagenet-1k-id2label.json'''
A__ : Any = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
A__ : Dict = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ : int = idalabel
A__ : List[str] = {v: k for k, v in idalabel.items()}
A__ : List[str] = int(vit_name[-6:-4] )
A__ : Any = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
A__ : List[str] = 1_92
A__ : Optional[Any] = 7_68
A__ : Optional[int] = 12
A__ : Tuple = 3
elif vit_name[9:].startswith('''small''' ):
A__ : int = 3_84
A__ : Union[str, Any] = 15_36
A__ : Union[str, Any] = 12
A__ : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
A__ : List[Any] = 7_68
A__ : Tuple = 23_04
A__ : List[str] = 8
A__ : Union[str, Any] = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
A__ : Optional[int] = 10_24
A__ : List[Any] = 40_96
A__ : List[str] = 24
A__ : Optional[Any] = 16
elif vit_name[4:].startswith('''huge''' ):
A__ : Optional[Any] = 12_80
A__ : Dict = 51_20
A__ : Dict = 32
A__ : Optional[Any] = 16
# load original model from timm
A__ : Optional[int] = timm.create_model(__UpperCamelCase , pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
A__ : List[Any] = create_rename_keys(__UpperCamelCase , __UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A__ : Optional[Any] = ViTModel(__UpperCamelCase ).eval()
else:
A__ : List[str] = ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A__ : Optional[Any] = DeiTImageProcessor(size=config.image_size )
else:
A__ : int = ViTImageProcessor(size=config.image_size )
A__ : List[str] = image_processor(images=prepare_img() , return_tensors='''pt''' )
A__ : Dict = encoding['''pixel_values''']
A__ : List[str] = model(__UpperCamelCase )
if base_model:
A__ : List[Any] = timm_model.forward_features(__UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__UpperCamelCase , outputs.pooler_output , atol=1e-3 )
else:
A__ : Optional[int] = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1e-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 717
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "deformable_detr"
_lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=3 , UpperCamelCase__=300 , UpperCamelCase__=1024 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1.0 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="sine" , UpperCamelCase__="resnet50" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=False , UpperCamelCase__=300 , UpperCamelCase__=False , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , UpperCamelCase__=0.2_5 , UpperCamelCase__=False , **UpperCamelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A__ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = backbone_config.get('''model_type''' )
A__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
A__ : Optional[int] = config_class.from_dict(UpperCamelCase__ )
A__ : Tuple = use_timm_backbone
A__ : int = backbone_config
A__ : List[Any] = num_channels
A__ : List[Any] = num_queries
A__ : str = max_position_embeddings
A__ : Tuple = d_model
A__ : int = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : Optional[Any] = encoder_attention_heads
A__ : List[Any] = decoder_ffn_dim
A__ : Tuple = decoder_layers
A__ : Optional[Any] = decoder_attention_heads
A__ : List[str] = dropout
A__ : str = attention_dropout
A__ : List[Any] = activation_dropout
A__ : Any = activation_function
A__ : Optional[Any] = init_std
A__ : Union[str, Any] = init_xavier_std
A__ : Union[str, Any] = encoder_layerdrop
A__ : Optional[int] = auxiliary_loss
A__ : str = position_embedding_type
A__ : List[Any] = backbone
A__ : Optional[Any] = use_pretrained_backbone
A__ : Any = dilation
# deformable attributes
A__ : List[Any] = num_feature_levels
A__ : List[str] = encoder_n_points
A__ : int = decoder_n_points
A__ : List[Any] = two_stage
A__ : Dict = two_stage_num_proposals
A__ : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
A__ : List[str] = class_cost
A__ : List[Any] = bbox_cost
A__ : Any = giou_cost
# Loss coefficients
A__ : List[str] = mask_loss_coefficient
A__ : Union[str, Any] = dice_loss_coefficient
A__ : List[Any] = bbox_loss_coefficient
A__ : Tuple = giou_loss_coefficient
A__ : Optional[Any] = eos_coefficient
A__ : List[Any] = focal_alpha
A__ : List[str] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.encoder_attention_heads
@property
def __snake_case ( self ):
return self.d_model
def __snake_case ( self ):
A__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : Tuple = self.backbone_config.to_dict()
A__ : Optional[int] = self.__class__.model_type
return output
| 55
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ShapEPipeline
_lowerCAmelCase = ["prompt"]
_lowerCAmelCase = ["prompt"]
_lowerCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_lowerCAmelCase = False
@property
def __snake_case ( self ):
return 32
@property
def __snake_case ( self ):
return 32
@property
def __snake_case ( self ):
return self.time_input_dim * 4
@property
def __snake_case ( self ):
return 8
@property
def __snake_case ( self ):
A__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __snake_case ( self ):
torch.manual_seed(0 )
A__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def __snake_case ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
A__ : int = PriorTransformer(**UpperCamelCase__ )
return model
@property
def __snake_case ( self ):
torch.manual_seed(0 )
A__ : int = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
A__ : Any = ShapERenderer(**UpperCamelCase__ )
return model
def __snake_case ( self ):
A__ : Optional[int] = self.dummy_prior
A__ : Dict = self.dummy_text_encoder
A__ : Optional[int] = self.dummy_tokenizer
A__ : Optional[int] = self.dummy_renderer
A__ : List[str] = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase__ , clip_sample=UpperCamelCase__ , clip_sample_range=1.0 , )
A__ : Optional[Any] = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=0 ):
if str(UpperCamelCase__ ).startswith('''mps''' ):
A__ : Optional[int] = torch.manual_seed(UpperCamelCase__ )
else:
A__ : Union[str, Any] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A__ : List[Any] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __snake_case ( self ):
A__ : Tuple = '''cpu'''
A__ : str = self.get_dummy_components()
A__ : Any = self.pipeline_class(**UpperCamelCase__ )
A__ : Any = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A__ : Optional[Any] = output.images[0]
A__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
A__ : Optional[Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __snake_case ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __snake_case ( self ):
A__ : Dict = torch_device == '''cpu'''
A__ : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , )
def __snake_case ( self ):
A__ : Dict = self.get_dummy_components()
A__ : Dict = self.pipeline_class(**UpperCamelCase__ )
A__ : Optional[int] = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Optional[int] = 1
A__ : List[Any] = 2
A__ : Any = self.get_dummy_inputs(UpperCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
A__ : List[str] = batch_size * [inputs[key]]
A__ : Optional[int] = pipe(**UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self ):
A__ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
A__ : Dict = ShapEPipeline.from_pretrained('''openai/shap-e''' )
A__ : Any = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Union[str, Any] = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
A__ : str = pipe(
'''a shark''' , generator=UpperCamelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 718
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] = 0
A__ : Optional[Any] = len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
if len(__UpperCamelCase ) <= 1:
return arr, 0
A__ : Optional[int] = len(__UpperCamelCase ) // 2
A__ : List[str] = arr[0:mid]
A__ : Union[str, Any] = arr[mid:]
A__ , A__ : List[Any] = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : Dict = _count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
A__ : Any = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
A__ : str = []
A__ : Tuple = 0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A__ : int = count_inversions_bf(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A__ : Optional[Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Dict = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
# an empty list should also have zero inversions
A__ : Union[str, Any] = []
A__ : Union[str, Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Any = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 0
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_SCREAMING_SNAKE_CASE : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase__ ( datasets.BuilderConfig ):
'''simple docstring'''
_lowerCAmelCase = 10_000
_lowerCAmelCase = None
_lowerCAmelCase = None
class UpperCamelCase__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_lowerCAmelCase = ParquetConfig
def __snake_case ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __snake_case ( self , UpperCamelCase__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
A__ : Tuple = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
A__ : Optional[int] = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A__ : Optional[int] = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A__ : Dict = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A__ : int = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase__ ):
with open(UpperCamelCase__ , '''rb''' ) as f:
A__ : List[Any] = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={'''files''': files} ) )
return splits
def __snake_case ( self , UpperCamelCase__ ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : str = table_cast(UpperCamelCase__ , self.info.features.arrow_schema )
return pa_table
def __snake_case ( self , UpperCamelCase__ ):
A__ : Dict = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
with open(UpperCamelCase__ , '''rb''' ) as f:
A__ : Dict = pq.ParquetFile(UpperCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
A__ : Dict = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(UpperCamelCase__ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(UpperCamelCase__ )}: {e}" )
raise
| 719
|
from PIL import Image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Image , __UpperCamelCase : float ) -> Image:
"""simple docstring"""
def brightness(__UpperCamelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_SCREAMING_SNAKE_CASE : Dict = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 55
| 0
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=2 , UpperCamelCase__=99 , UpperCamelCase__=0 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.0_2 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__="last" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=0 , ):
A__ : str = parent
A__ : Tuple = batch_size
A__ : str = seq_length
A__ : List[str] = is_training
A__ : int = use_input_lengths
A__ : Optional[Any] = use_token_type_ids
A__ : Tuple = use_labels
A__ : Union[str, Any] = gelu_activation
A__ : List[Any] = sinusoidal_embeddings
A__ : List[str] = causal
A__ : Optional[int] = asm
A__ : Dict = n_langs
A__ : Optional[int] = vocab_size
A__ : Optional[Any] = n_special
A__ : str = hidden_size
A__ : int = num_hidden_layers
A__ : Union[str, Any] = num_attention_heads
A__ : Union[str, Any] = hidden_dropout_prob
A__ : str = attention_probs_dropout_prob
A__ : Dict = max_position_embeddings
A__ : int = type_sequence_label_size
A__ : Optional[Any] = initializer_range
A__ : Dict = num_labels
A__ : Optional[Any] = num_choices
A__ : Tuple = summary_type
A__ : Any = use_proj
A__ : List[Any] = scope
A__ : List[str] = bos_token_id
def __snake_case ( self ):
A__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : List[str] = None
if self.use_input_lengths:
A__ : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A__ : Union[str, Any] = None
if self.use_token_type_ids:
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A__ : Any = None
A__ : Any = None
A__ : str = None
if self.use_labels:
A__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Tuple = ids_tensor([self.batch_size] , 2 ).float()
A__ : Any = ids_tensor([self.batch_size] , self.num_choices )
A__ : Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __snake_case ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
A__ : int = XLMModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] = model(UpperCamelCase__ , lengths=UpperCamelCase__ , langs=UpperCamelCase__ )
A__ : str = model(UpperCamelCase__ , langs=UpperCamelCase__ )
A__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
A__ : Optional[Any] = XLMWithLMHeadModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Tuple = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
A__ : List[str] = XLMForQuestionAnsweringSimple(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[int] = model(UpperCamelCase__ )
A__ : Optional[Any] = model(UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ )
A__ : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
A__ : List[str] = XLMForQuestionAnswering(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] = model(UpperCamelCase__ )
A__ : List[Any] = model(
UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , cls_index=UpperCamelCase__ , is_impossible=UpperCamelCase__ , p_mask=UpperCamelCase__ , )
A__ : int = model(
UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , cls_index=UpperCamelCase__ , is_impossible=UpperCamelCase__ , )
(A__ ) : Union[str, Any] = result_with_labels.to_tuple()
A__ : str = model(UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ )
(A__ ) : List[str] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
A__ : Any = XLMForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Tuple = model(UpperCamelCase__ )
A__ : Any = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
A__ : List[str] = self.num_labels
A__ : Union[str, Any] = XLMForTokenClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : List[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
A__ : int = self.num_choices
A__ : Any = XLMForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Optional[int] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self ):
A__ : Dict = self.prepare_config_and_inputs()
(
A__
) : Optional[int] = config_and_inputs
A__ : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCAmelCase = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Union[str, Any] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A__ : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
A__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def __snake_case ( self ):
A__ : Optional[Any] = XLMModelTester(self )
A__ : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase__ , emb_dim=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=1 ):
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(
[isinstance(UpperCamelCase__ , UpperCamelCase__ ) for iter_attentions in attentions] , [True] * len(UpperCamelCase__ ) )
self.assertEqual(len(UpperCamelCase__ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(UpperCamelCase__ ):
# adds PAD dummy token
A__ : str = min_length + idx + 1
A__ : Any = min_length + idx + 1
A__ : str = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCamelCase__ ) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=1 ):
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(
[isinstance(UpperCamelCase__ , UpperCamelCase__ ) for iter_hidden_states in hidden_states] , [True] * len(UpperCamelCase__ ) , )
self.assertEqual(len(UpperCamelCase__ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(UpperCamelCase__ ):
# adds PAD dummy token
A__ : List[Any] = min_length + idx + 1
A__ : str = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCamelCase__ ) , )
pass
@slow
def __snake_case ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Dict = XLMModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ):
A__ : str = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(UpperCamelCase__ )
A__ : Union[str, Any] = torch.tensor([[14, 447]] , dtype=torch.long , device=UpperCamelCase__ ) # the president
A__ : Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A__ : Tuple = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCamelCase__ )
| 720
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
def __snake_case ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
A__ : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
A__ : Dict = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
A__ : Optional[int] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : str = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 55
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : int = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 721
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_SCREAMING_SNAKE_CASE : Union[str, Any] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_SCREAMING_SNAKE_CASE : Tuple = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_SCREAMING_SNAKE_CASE : Optional[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , ):
A__ : List[Any] = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
A__ : Dict = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
A__ : Optional[Any] = TER(
normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , )
A__ : str = sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 55
| 0
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = RoCBertTokenizer
_lowerCAmelCase = None
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = filter_non_english
def __snake_case ( self ):
super().setUp()
A__ : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
A__ : int = {}
A__ : Dict = {}
for i, value in enumerate(UpperCamelCase__ ):
A__ : int = i
A__ : Any = i
A__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
A__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__ )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
A__ : Tuple = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCamelCase__ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__ ) , [5, 6, 2, 5, 7, 8] )
def __snake_case ( self ):
A__ : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __snake_case ( self ):
A__ : List[str] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self ):
A__ : str = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __snake_case ( self ):
A__ : str = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self ):
A__ : int = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __snake_case ( self ):
A__ : List[Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self ):
A__ : Any = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self ):
A__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __snake_case ( self ):
A__ : Dict = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __snake_case ( self ):
A__ : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
A__ : Union[str, Any] = {}
for i, token in enumerate(UpperCamelCase__ ):
A__ : List[str] = i
A__ : Dict = RoCBertWordpieceTokenizer(vocab=UpperCamelCase__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __snake_case ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __snake_case ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __snake_case ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __snake_case ( self ):
A__ : Tuple = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCamelCase__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
A__ : Optional[int] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCamelCase__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def __snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[int] = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
A__ : Dict = tokenizer_r.encode_plus(
UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , )
A__ : Tuple = tokenizer_r.do_lower_case if hasattr(UpperCamelCase__ , '''do_lower_case''' ) else False
A__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __snake_case ( self ):
A__ : int = ['''的''', '''人''', '''有''']
A__ : Tuple = ''''''.join(UpperCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ : List[str] = True
A__ : List[str] = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
A__ : Any = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
A__ : List[str] = tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A__ : str = tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__ )
A__ : int = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
A__ : Optional[int] = False
A__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
A__ : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
A__ : Any = tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A__ : Dict = tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A__ : Dict = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__ )
A__ : str = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
A__ : Tuple = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCamelCase__ )
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def __snake_case ( self ):
A__ : int = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
A__ : str = tokenizer.encode('''你好''' , add_special_tokens=UpperCamelCase__ )
A__ : List[str] = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCamelCase__ )
A__ : Dict = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
A__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __snake_case ( self ):
A__ : Tuple = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
A__ : Union[str, Any] = '''你好,你是谁'''
A__ : Optional[int] = tokenizer.tokenize(UpperCamelCase__ )
A__ : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
A__ : int = tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__ )
A__ : Any = tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__ )
A__ : Dict = tokenizer.prepare_for_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A__ : Any = tokenizer.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 700
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
# TODO Update this
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "esm"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1026 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[Any] = vocab_size
A__ : int = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : str = intermediate_size
A__ : List[str] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : List[str] = initializer_range
A__ : List[Any] = layer_norm_eps
A__ : int = position_embedding_type
A__ : Optional[Any] = use_cache
A__ : Optional[int] = emb_layer_norm_before
A__ : List[str] = token_dropout
A__ : Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
A__ : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = EsmFoldConfig(**UpperCamelCase__ )
A__ : int = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
A__ : Any = get_default_vocab_list()
else:
A__ : Dict = vocab_list
else:
A__ : Optional[Any] = None
A__ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __snake_case ( self ):
A__ : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
A__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 0
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.trunk is None:
A__ : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
A__ : List[Any] = TrunkConfig(**self.trunk )
def __snake_case ( self ):
A__ : Optional[int] = asdict(self )
A__ : int = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 48
_lowerCAmelCase = 1_024
_lowerCAmelCase = 128
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = False
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.structure_module is None:
A__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
A__ : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A__ : Tuple = self.sequence_state_dim // self.sequence_head_width
A__ : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __snake_case ( self ):
A__ : List[Any] = asdict(self )
A__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 384
_lowerCAmelCase = 128
_lowerCAmelCase = 16
_lowerCAmelCase = 128
_lowerCAmelCase = 12
_lowerCAmelCase = 4
_lowerCAmelCase = 8
_lowerCAmelCase = 0.1
_lowerCAmelCase = 8
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 7
_lowerCAmelCase = 10
_lowerCAmelCase = 1e-8
_lowerCAmelCase = 1e5
def __snake_case ( self ):
return asdict(self )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55
| 0
|
_SCREAMING_SNAKE_CASE : List[str] = range(2, 2_0 + 1)
_SCREAMING_SNAKE_CASE : Optional[Any] = [1_0**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
A__ : Tuple = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
A__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
A__ : Optional[int] = 0, 0
A__ : List[Any] = n - i
A__ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
A__ : Optional[int] = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
A__ : List[Any] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ : List[str] = _k
break
if max_jump >= 0:
A__ : List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
A__ : int = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
A__ : List[str] = divmod(__UpperCamelCase , 10 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A__ : List[Any] = []
else:
A__ : Optional[Any] = {c: []}
A__ : int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ : str = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ : str = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
A__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ : List[Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ : Optional[Any] = i
A__ : Dict = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ : int = ds_c + ds_b
diff += addend
A__ : List[Any] = 0
for j in range(__UpperCamelCase ):
A__ : Optional[Any] = a_i[j] + addend
A__ : List[str] = divmod(__UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
A__ : Any = digits[j] + addend
if s >= 10:
A__ : Union[str, Any] = divmod(__UpperCamelCase , 10 )
A__ : Optional[int] = addend // 10 + quotient
else:
A__ : Any = s
A__ : Dict = addend // 10
if addend == 0:
break
while addend > 0:
A__ : Dict = divmod(__UpperCamelCase , 10 )
digits.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**15 ) -> int:
"""simple docstring"""
A__ : List[Any] = [1]
A__ : Dict = 1
A__ : Tuple = 0
while True:
A__ : List[str] = next_term(__UpperCamelCase , 20 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
A__ : List[str] = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 701
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1024 , UpperCamelCase__=1024 , UpperCamelCase__=3.6 ):
A__ : str = tokenizer
A__ : int = tokenizer.bos_token_id
A__ : List[Any] = dataset
A__ : Tuple = seq_length
A__ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
A__ : Dict = iter(self.dataset )
A__ : Tuple = True
while more_examples:
A__ , A__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ : Dict = False
break
A__ : str = tokenizer(UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
A__ : Optional[int] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCamelCase__ ) , self.seq_length ):
A__ : Optional[int] = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase__ ) == self.seq_length:
yield torch.tensor(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
A__ : Any = {'''streaming''': True}
A__ : List[str] = load_dataset(args.dataset_name , split='''train''' , **__UpperCamelCase )
A__ : List[str] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
A__ : int = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
model.eval()
A__ : Dict = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
A__ : Any = model(__UpperCamelCase , labels=__UpperCamelCase )
A__ : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ : Tuple = torch.mean(torch.cat(__UpperCamelCase ) )
try:
A__ : Optional[Any] = torch.exp(__UpperCamelCase )
except OverflowError:
A__ : Union[str, Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_SCREAMING_SNAKE_CASE : List[Any] = Accelerator()
# Parse configuration
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(EvaluationArguments)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_SCREAMING_SNAKE_CASE : Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 55
| 0
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = ["speech"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
requires_backends(self , ['''speech'''] )
class UpperCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = ["speech"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
requires_backends(self , ['''speech'''] )
| 702
|
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 55
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "upernet"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=512 , UpperCamelCase__=0.0_2 , UpperCamelCase__=[1, 2, 3, 6] , UpperCamelCase__=True , UpperCamelCase__=0.4 , UpperCamelCase__=384 , UpperCamelCase__=256 , UpperCamelCase__=1 , UpperCamelCase__=False , UpperCamelCase__=255 , **UpperCamelCase__ , ):
super().__init__(**UpperCamelCase__ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A__ : Optional[int] = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = backbone_config.get('''model_type''' )
A__ : Dict = CONFIG_MAPPING[backbone_model_type]
A__ : Optional[Any] = config_class.from_dict(UpperCamelCase__ )
A__ : Union[str, Any] = backbone_config
A__ : List[str] = hidden_size
A__ : Tuple = initializer_range
A__ : Optional[int] = pool_scales
A__ : Tuple = use_auxiliary_head
A__ : Optional[Any] = auxiliary_loss_weight
A__ : Optional[int] = auxiliary_in_channels
A__ : Union[str, Any] = auxiliary_channels
A__ : Tuple = auxiliary_num_convs
A__ : List[Any] = auxiliary_concat_input
A__ : Optional[int] = loss_ignore_index
def __snake_case ( self ):
A__ : Optional[int] = copy.deepcopy(self.__dict__ )
A__ : Optional[Any] = self.backbone_config.to_dict()
A__ : Any = self.__class__.model_type
return output
| 703
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Dict = inspect.getfile(accelerate.test_utils )
A__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A__ : Tuple = test_metrics
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __snake_case ( self ):
self.test_metrics.main()
@require_multi_gpu
def __snake_case ( self ):
print(F"Found {torch.cuda.device_count()} devices." )
A__ : int = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 55
| 0
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : List[str] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
A__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ):
A__ : Tuple = parent
A__ : Tuple = batch_size
A__ : Tuple = seq_length
A__ : Union[str, Any] = is_training
A__ : Any = use_input_mask
A__ : Union[str, Any] = use_token_type_ids
A__ : Optional[Any] = use_labels
A__ : Dict = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : str = num_hidden_layers
A__ : List[str] = num_attention_heads
A__ : List[str] = intermediate_size
A__ : Union[str, Any] = hidden_act
A__ : int = hidden_dropout_prob
A__ : List[str] = attention_probs_dropout_prob
A__ : Tuple = max_position_embeddings
A__ : Union[str, Any] = type_vocab_size
A__ : List[str] = type_sequence_label_size
A__ : Union[str, Any] = initializer_range
A__ : List[Any] = num_labels
A__ : List[str] = num_choices
A__ : Dict = scope
A__ : List[Any] = embedding_size
def __snake_case ( self ):
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Optional[Any] = None
if self.use_input_mask:
A__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[int] = None
if self.use_token_type_ids:
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Optional[int] = None
A__ : Union[str, Any] = None
A__ : Optional[int] = None
if self.use_labels:
A__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Any = ids_tensor([self.batch_size] , self.num_choices )
A__ : Dict = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = TFMobileBertModel(config=UpperCamelCase__ )
A__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ : List[str] = model(UpperCamelCase__ )
A__ : List[Any] = [input_ids, input_mask]
A__ : Optional[Any] = model(UpperCamelCase__ )
A__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : int = TFMobileBertForMaskedLM(config=UpperCamelCase__ )
A__ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=UpperCamelCase__ )
A__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : int = TFMobileBertForPreTraining(config=UpperCamelCase__ )
A__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = self.num_labels
A__ : int = TFMobileBertForSequenceClassification(config=UpperCamelCase__ )
A__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ : Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = self.num_choices
A__ : str = TFMobileBertForMultipleChoice(config=UpperCamelCase__ )
A__ : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A__ : Dict = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A__ : List[Any] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A__ : Tuple = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = self.num_labels
A__ : Optional[int] = TFMobileBertForTokenClassification(config=UpperCamelCase__ )
A__ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Dict = TFMobileBertForQuestionAnswering(config=UpperCamelCase__ )
A__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self ):
A__ : List[str] = self.prepare_config_and_inputs()
(
A__
) : Optional[int] = config_and_inputs
A__ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def __snake_case ( self ):
A__ : str = TFMobileBertModelTest.TFMobileBertModelTester(self )
A__ : Tuple = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
A__ : int = TFMobileBertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ):
A__ : Union[str, Any] = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
A__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
A__ : List[str] = model(UpperCamelCase__ )[0]
A__ : int = [1, 6, 3_0522]
self.assertEqual(output.shape , UpperCamelCase__ )
A__ : Union[str, Any] = tf.constant(
[
[
[-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6],
[-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7],
[-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 704
|
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 0
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ = question_encoder
A__ = generator
A__ = self.question_encoder
def __snake_case ( self , UpperCamelCase__ ):
if os.path.isfile(UpperCamelCase__ ):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
A__ = os.path.join(UpperCamelCase__ , '''question_encoder_tokenizer''' )
A__ = os.path.join(UpperCamelCase__ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(UpperCamelCase__ )
self.generator.save_pretrained(UpperCamelCase__ )
@classmethod
def __snake_case ( cls , UpperCamelCase__ , **UpperCamelCase__ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
A__ = kwargs.pop('''config''' , UpperCamelCase__ )
if config is None:
A__ = RagConfig.from_pretrained(UpperCamelCase__ )
A__ = AutoTokenizer.from_pretrained(
UpperCamelCase__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
A__ = AutoTokenizer.from_pretrained(
UpperCamelCase__ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=UpperCamelCase__ , generator=UpperCamelCase__ )
def __call__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
return self.current_tokenizer(*UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
return self.generator.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
return self.generator.decode(*UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self ):
A__ = self.question_encoder
def __snake_case ( self ):
A__ = self.generator
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "longest" , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ):
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , UpperCamelCase__ , )
if max_length is None:
A__ = self.current_tokenizer.model_max_length
A__ = self(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , max_length=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , **UpperCamelCase__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
A__ = self.current_tokenizer.model_max_length
A__ = self(
text_target=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , **UpperCamelCase__ , )
A__ = labels['''input_ids''']
return model_inputs
| 705
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : int = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
| 0
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
@add_end_docstrings(
SCREAMING_SNAKE_CASE_, R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ", )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
if self.framework == "tf":
A__ : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A__ : Any = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCamelCase__ )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __snake_case ( self , UpperCamelCase__ ):
A__ : Dict = self.get_masked_index(UpperCamelCase__ )
A__ : str = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def __snake_case ( self , UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None , **UpperCamelCase__ ):
if return_tensors is None:
A__ : str = self.framework
A__ : str = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.ensure_exactly_one_mask_token(UpperCamelCase__ )
return model_inputs
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = self.model(**UpperCamelCase__ )
A__ : List[str] = model_inputs['''input_ids''']
return model_outputs
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=5 , UpperCamelCase__=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
A__ : Union[str, Any] = target_ids.shape[0]
A__ : str = model_outputs['''input_ids'''][0]
A__ : Tuple = model_outputs['''logits''']
if self.framework == "tf":
A__ : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A__ : Dict = outputs.numpy()
A__ : Tuple = outputs[0, masked_index, :]
A__ : str = stable_softmax(UpperCamelCase__ , axis=-1 )
if target_ids is not None:
A__ : int = tf.gather_nd(tf.squeeze(UpperCamelCase__ , 0 ) , target_ids.reshape(-1 , 1 ) )
A__ : str = tf.expand_dims(UpperCamelCase__ , 0 )
A__ : str = tf.math.top_k(UpperCamelCase__ , k=UpperCamelCase__ )
A__ : List[str] = topk.values.numpy(), topk.indices.numpy()
else:
A__ : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCamelCase__ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A__ : List[Any] = outputs[0, masked_index, :]
A__ : Tuple = logits.softmax(dim=-1 )
if target_ids is not None:
A__ : str = probs[..., target_ids]
A__ : Tuple = probs.topk(UpperCamelCase__ )
A__ : Union[str, Any] = []
A__ : str = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A__ : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A__ : List[str] = input_ids.numpy().copy()
if target_ids is not None:
A__ : Any = target_ids[p].tolist()
A__ : List[str] = p
# Filter padding out:
A__ : Dict = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A__ : Any = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
A__ : str = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(UpperCamelCase__ )
result.append(UpperCamelCase__ )
if single_mask:
return result[0]
return result
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Dict = [targets]
try:
A__ : Any = self.tokenizer.get_vocab()
except Exception:
A__ : Tuple = {}
A__ : str = []
for target in targets:
A__ : int = vocab.get(UpperCamelCase__ , UpperCamelCase__ )
if id_ is None:
A__ : Tuple = self.tokenizer(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , max_length=1 , truncation=UpperCamelCase__ , )['''input_ids''']
if len(UpperCamelCase__ ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
A__ : int = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
A__ : str = list(set(UpperCamelCase__ ) )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
A__ : Optional[int] = np.array(UpperCamelCase__ )
return target_ids
def __snake_case ( self , UpperCamelCase__=None , UpperCamelCase__=None ):
A__ : Tuple = {}
if targets is not None:
A__ : Any = self.get_target_ids(UpperCamelCase__ , UpperCamelCase__ )
A__ : List[Any] = target_ids
if top_k is not None:
A__ : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ):
A__ : Any = super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) == 1:
return outputs[0]
return outputs
| 706
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/vocab.json')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures')
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def __snake_case ( self ):
A__ : List[Any] = 0
def __snake_case ( self ):
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = WavaVecaConfig()
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
A__ : Any = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Dict = WavaVecaFeatureExtractor()
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : Optional[int] = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : str = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[int] = WavaVecaFeatureExtractor()
A__ : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : str = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : List[Any] = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(UpperCamelCase__ )
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write('''{}''' )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A__ : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
A__ : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A__ : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A__ : List[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A__ : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
A__ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __snake_case ( self ):
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : str = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : str = CustomTokenizer(UpperCamelCase__ )
A__ : Optional[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "AutoFeatureExtractor"
_lowerCAmelCase = "AutoTokenizer"
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local classes.
A__ : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A__ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A__ : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
A__ : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __snake_case ( self ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __snake_case ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __snake_case ( self ):
A__ : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A__ : List[Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
A__ : int = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
A__ : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : List[Any] = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : Union[str, Any] = CustomTokenizer(UpperCamelCase__ )
A__ : List[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
A__ : Union[str, Any] = Repository(UpperCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(UpperCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A__ : Tuple = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 55
| 0
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 4_00_00_00 ) -> int:
"""simple docstring"""
A__ : Any = [0, 1]
A__ : Optional[int] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
A__ : int = 0
for j in range(len(__UpperCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 707
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __snake_case ( UpperCamelCase__ ):
raise NotImplementedError()
@abstractmethod
def __snake_case ( self ):
raise NotImplementedError()
| 55
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "megatron-bert"
def __init__( self , UpperCamelCase__=2_9056 , UpperCamelCase__=1024 , UpperCamelCase__=24 , UpperCamelCase__=16 , UpperCamelCase__=4096 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=True , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : List[Any] = vocab_size
A__ : Tuple = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : str = num_attention_heads
A__ : Dict = hidden_act
A__ : Dict = intermediate_size
A__ : str = hidden_dropout_prob
A__ : Tuple = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Tuple = type_vocab_size
A__ : List[str] = initializer_range
A__ : List[Any] = layer_norm_eps
A__ : Optional[int] = position_embedding_type
A__ : str = use_cache
| 708
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=[30, 30] , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=8 , UpperCamelCase__=10 , ):
A__ : Optional[int] = parent
A__ : List[Any] = batch_size
A__ : Dict = image_size
A__ : Any = patch_size
A__ : Dict = num_channels
A__ : List[Any] = is_training
A__ : int = use_labels
A__ : Any = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : str = hidden_act
A__ : str = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Optional[int] = type_sequence_label_size
A__ : Any = initializer_range
A__ : Optional[int] = num_labels
A__ : Union[str, Any] = scope
A__ : Union[str, Any] = n_targets
A__ : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A__ : List[str] = num_patches + 1 + self.num_detection_tokens
def __snake_case ( self ):
A__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A__ : Tuple = []
for i in range(self.batch_size ):
A__ : List[Any] = {}
A__ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ )
A__ : Any = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ )
labels.append(UpperCamelCase__ )
A__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = YolosModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = YolosForObjectDetection(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ )
A__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __snake_case ( self ):
A__ : Optional[int] = self.prepare_config_and_inputs()
A__ , A__ , A__ : Optional[Any] = config_and_inputs
A__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Optional[int] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A__ : str = []
for i in range(self.model_tester.batch_size ):
A__ : int = {}
A__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long )
A__ : Dict = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float )
labels.append(UpperCamelCase__ )
A__ : Dict = labels
return inputs_dict
def __snake_case ( self ):
A__ : List[Any] = YolosModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
# YOLOS does not use inputs_embeds
pass
def __snake_case ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(UpperCamelCase__ )
A__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] = [*signature.parameters.keys()]
A__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ , A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Tuple = True
# in YOLOS, the seq_len is different
A__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A__ : Any = True
A__ : Optional[int] = False
A__ : Optional[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Tuple = True
A__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ : List[Any] = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[str] = True
A__ : List[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
A__ : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] = outputs.hidden_states
A__ : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# YOLOS has a different seq_length
A__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Tuple = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase__ )
A__ : str = self.default_image_processor
A__ : Tuple = prepare_img()
A__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Any = model(inputs.pixel_values )
# verify outputs
A__ : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=UpperCamelCase__ , )
A__ : Optional[int] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify postprocessing
A__ : Dict = image_processor.post_process_object_detection(
UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A__ : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCamelCase__ )
A__ : str = [75, 75, 17, 63, 17]
A__ : Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(UpperCamelCase__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase__ ) )
| 55
| 0
|
_SCREAMING_SNAKE_CASE : List[str] = 8.31_4462 # Unit - J mol-1 K-1
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 709
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
A__ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
A__ : Dict = 0
A__ : Optional[int] = 2
while digits < n:
index += 1
A__ : Dict = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 55
| 0
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Optional[int] = torch.nn.Linear(2 , 4 )
A__ : int = torch.optim.AdamW(model.parameters() , lr=1.0 )
A__ : Any = torch.optim.lr_scheduler.OneCycleLR(__UpperCamelCase , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1 )
A__ : Optional[Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
A__ : Union[str, Any] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
A__ : Union[str, Any] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(__UpperCamelCase )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@require_cuda
def __snake_case ( self ):
A__ : Any = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(UpperCamelCase__ ):
A__ : Dict = Accelerator(cpu=UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[int] = Accelerator()
A__ : str = GradientState()
assert state.num_steps == 1
A__ : Optional[Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
A__ : int = False
assert state.sync_gradients is False
GradientState._reset_state()
def __snake_case ( self ):
A__ : Optional[int] = Accelerator()
A__ : Dict = create_components()
(
A__
) : Any = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __snake_case ( self ):
A__ : Dict = Accelerator()
A__ : str = create_components()
accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __snake_case ( self ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*UpperCamelCase__ , **UpperCamelCase__ ):
pass
with patch('''torch.cuda.set_device''' , UpperCamelCase__ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
A__ : Optional[int] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def __snake_case ( self ):
A__ : Tuple = Accelerator()
A__ : Any = create_components()
accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : Optional[Any] = get_signature(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCamelCase__ )
# make sure random weights don't match
load_random_weights(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) < 1e-3 )
def __snake_case ( self ):
A__ : List[Any] = Accelerator()
A__ : Optional[int] = create_components()
accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : Union[str, Any] = get_signature(UpperCamelCase__ )
# saving hook
def save_config(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(UpperCamelCase__ , '''data.json''' ) , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# loading hook
def load_config(UpperCamelCase__ , UpperCamelCase__ ):
with open(os.path.join(UpperCamelCase__ , '''data.json''' ) , '''r''' ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
A__ : List[Any] = config['''class_name''']
A__ : Dict = accelerator.register_save_state_pre_hook(UpperCamelCase__ )
A__ : Optional[int] = accelerator.register_load_state_pre_hook(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCamelCase__ )
# make sure random weights don't match with hooks
load_random_weights(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
A__ : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCamelCase__ )
# make sure random weights don't match with hooks removed
load_random_weights(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
A__ : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __snake_case ( self ):
A__ : int = Accelerator()
A__ : Tuple = create_components()
A__ : List[Any] = None
# This should work
A__ : Tuple = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertTrue(dummy_obj is None )
def __snake_case ( self ):
A__ : str = Accelerator()
A__ : int = create_components()
A__ : Any = [1, 2, 3]
# This should work
A__ : List[Any] = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(
getattr(UpperCamelCase__ , '''_is_accelerate_prepared''' , UpperCamelCase__ ) , UpperCamelCase__ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(UpperCamelCase__ , '''_is_accelerate_prepared''' , UpperCamelCase__ ) , UpperCamelCase__ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(UpperCamelCase__ , '''_is_accelerate_prepared''' , UpperCamelCase__ ) , UpperCamelCase__ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(UpperCamelCase__ , '''_is_accelerate_prepared''' , UpperCamelCase__ ) , UpperCamelCase__ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(UpperCamelCase__ , '''_is_accelerate_prepared''' , UpperCamelCase__ ) , UpperCamelCase__ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(UpperCamelCase__ , '''_is_accelerate_prepared''' , UpperCamelCase__ ) , UpperCamelCase__ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def __snake_case ( self ):
from transformers import AutoModelForCausalLM
A__ : Tuple = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=UpperCamelCase__ , device_map={'''''': 0} , )
A__ : Optional[int] = Accelerator()
# This should work
A__ : Optional[Any] = accelerator.prepare(UpperCamelCase__ )
@slow
@require_bnb
def __snake_case ( self ):
from transformers import AutoModelForCausalLM
A__ : List[str] = Accelerator()
with init_empty_weights():
A__ : List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
A__ : Any = infer_auto_device_map(UpperCamelCase__ )
A__ : Any = '''cpu'''
A__ : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , llm_inta_enable_fpaa_cpu_offload=UpperCamelCase__ )
# This should not work and get value error
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = accelerator.prepare(UpperCamelCase__ )
@slow
@require_bnb
@require_multi_gpu
def __snake_case ( self ):
from transformers import AutoModelForCausalLM
A__ : Any = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
A__ : Tuple = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
A__ : Dict = infer_auto_device_map(UpperCamelCase__ )
A__ : Dict = 1
A__ : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=UpperCamelCase__ , device_map=UpperCamelCase__ , )
A__ : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(UpperCamelCase__ ):
A__ : Dict = accelerator.prepare(UpperCamelCase__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __snake_case ( self ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
A__ : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
A__ : Tuple = infer_auto_device_map(UpperCamelCase__ )
A__ : Optional[int] = 1
A__ : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=UpperCamelCase__ , device_map=UpperCamelCase__ , )
A__ : List[Any] = Accelerator()
# This should work
A__ : List[Any] = accelerator.prepare(UpperCamelCase__ )
@require_cuda
def __snake_case ( self ):
A__ : int = torch.nn.Linear(10 , 10 )
A__ : int = torch.optim.SGD(model.parameters() , lr=0.0_1 )
A__ : str = Accelerator(cpu=UpperCamelCase__ )
A__ : Optional[Any] = accelerator.prepare(UpperCamelCase__ )
| 710
|
_SCREAMING_SNAKE_CASE : List[str] = range(2, 2_0 + 1)
_SCREAMING_SNAKE_CASE : Optional[Any] = [1_0**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
A__ : Tuple = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
A__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
A__ , A__ : Optional[int] = 0, 0
A__ : List[Any] = n - i
A__ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
A__ : Optional[int] = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
A__ : List[Any] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ : List[str] = _k
break
if max_jump >= 0:
A__ , A__ , A__ : List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
A__ : int = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A__ : List[Any] = []
else:
A__ : Optional[Any] = {c: []}
A__ : int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ , A__ : str = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ , A__ : str = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
A__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ : List[Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ : Optional[Any] = i
A__ , A__ , A__ : Dict = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ : int = ds_c + ds_b
diff += addend
A__ : List[Any] = 0
for j in range(__UpperCamelCase ):
A__ : Optional[Any] = a_i[j] + addend
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
A__ : Any = digits[j] + addend
if s >= 10:
A__ , A__ : Union[str, Any] = divmod(__UpperCamelCase , 10 )
A__ : Optional[int] = addend // 10 + quotient
else:
A__ : Any = s
A__ : Dict = addend // 10
if addend == 0:
break
while addend > 0:
A__ , A__ : Dict = divmod(__UpperCamelCase , 10 )
digits.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**15 ) -> int:
"""simple docstring"""
A__ : List[Any] = [1]
A__ : Dict = 1
A__ : Tuple = 0
while True:
A__ , A__ : List[str] = next_term(__UpperCamelCase , 20 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
A__ : List[str] = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
| 0
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=2 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=36 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.0_2 , UpperCamelCase__=6 , UpperCamelCase__=6 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , UpperCamelCase__=1000 , ):
A__ : List[Any] = parent
A__ : Dict = batch_size
A__ : Optional[Any] = num_channels
A__ : Optional[Any] = image_size
A__ : Optional[Any] = patch_size
A__ : List[Any] = text_seq_length
A__ : List[Any] = is_training
A__ : Optional[int] = use_input_mask
A__ : Dict = use_token_type_ids
A__ : List[Any] = use_labels
A__ : int = vocab_size
A__ : Any = hidden_size
A__ : List[Any] = num_hidden_layers
A__ : Dict = num_attention_heads
A__ : int = intermediate_size
A__ : Optional[int] = hidden_act
A__ : Any = hidden_dropout_prob
A__ : Any = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : Optional[Any] = type_vocab_size
A__ : Tuple = type_sequence_label_size
A__ : Tuple = initializer_range
A__ : Dict = coordinate_size
A__ : Optional[Any] = shape_size
A__ : int = num_labels
A__ : Dict = num_choices
A__ : Any = scope
A__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A__ : Any = text_seq_length
A__ : Optional[Any] = (image_size // patch_size) ** 2 + 1
A__ : List[Any] = self.text_seq_length + self.image_seq_length
def __snake_case ( self ):
A__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A__ : Tuple = bbox[i, j, 3]
A__ : Optional[int] = bbox[i, j, 1]
A__ : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A__ : str = bbox[i, j, 2]
A__ : List[Any] = bbox[i, j, 0]
A__ : List[Any] = t
A__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Any = None
if self.use_input_mask:
A__ : int = random_attention_mask([self.batch_size, self.text_seq_length] )
A__ : str = None
if self.use_token_type_ids:
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
A__ : Dict = None
A__ : List[str] = None
if self.use_labels:
A__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
A__ : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = LayoutLMvaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# text + image
A__ : List[str] = model(UpperCamelCase__ , pixel_values=UpperCamelCase__ )
A__ : Optional[int] = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A__ : Union[str, Any] = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A__ : Optional[int] = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
A__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
A__ : Tuple = model(pixel_values=UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = self.num_labels
A__ : List[Any] = LayoutLMvaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Any = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Dict = self.num_labels
A__ : str = LayoutLMvaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : List[str] = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] = LayoutLMvaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Dict = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self ):
A__ : Any = self.prepare_config_and_inputs()
(
A__
) : List[Any] = config_and_inputs
A__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __snake_case ( self ):
A__ : str = LayoutLMvaModelTester(self )
A__ : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : str = copy.deepcopy(UpperCamelCase__ )
if model_class in get_values(UpperCamelCase__ ):
A__ : Optional[Any] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCamelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
A__ : List[Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in get_values(UpperCamelCase__ ):
A__ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
A__ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
A__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
A__ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCamelCase__ , )
return inputs_dict
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : Optional[Any] = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Any = LayoutLMvaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Tuple = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(UpperCamelCase__ )
A__ : Any = self.default_image_processor
A__ : Dict = prepare_img()
A__ : List[str] = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).pixel_values.to(UpperCamelCase__ )
A__ : Optional[int] = torch.tensor([[1, 2]] )
A__ : Optional[Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
A__ : Dict = model(
input_ids=input_ids.to(UpperCamelCase__ ) , bbox=bbox.to(UpperCamelCase__ ) , pixel_values=pixel_values.to(UpperCamelCase__ ) , )
# verify the logits
A__ : Union[str, Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ )
A__ : Tuple = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 711
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=False ) -> Tuple:
"""simple docstring"""
try:
A__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
A__ : Union[str, Any] = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if test_case is None:
return partial(__UpperCamelCase , version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , __UpperCamelCase ) , F"test requires torch version >= {version}" )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__UpperCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = True
@classmethod
def __snake_case ( cls ):
A__ : Tuple = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Any:
"""simple docstring"""
A__ : int = AcceleratorState()
A__ : Any = tensor[None].clone().to(state.device )
A__ : Optional[int] = gather(__UpperCamelCase ).cpu()
A__ : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCamelCase ):
return False
return True
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = returncode
A__ : Union[str, Any] = stdout
A__ : Dict = stderr
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
while True:
A__ : Tuple = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) )
A__ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ : List[Any] = []
A__ : str = []
def tee(__UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any]="" ):
A__ : Optional[Any] = line.decode('''utf-8''' ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=1_80 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
A__ : Dict = asyncio.get_event_loop()
A__ : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
A__ : Union[str, Any] = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
A__ : Optional[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=False ) -> Dict:
"""simple docstring"""
try:
A__ : List[Any] = subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase , '''decode''' ):
A__ : Any = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 55
| 0
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 712
|
import numpy as np
_SCREAMING_SNAKE_CASE : Any = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : List[Any] = np.array(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ , A__ : Any = np.where(letter == self.SQUARE )
A__ : int = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = message.lower()
A__ : str = message.replace(''' ''' , '''''' )
A__ : Union[str, Any] = message.replace('''j''' , '''i''' )
A__ : List[Any] = np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : Any = self.letter_to_numbers(message[letter_index] )
A__ : Optional[Any] = numbers[0]
A__ : List[str] = numbers[1]
A__ : List[str] = first_step.reshape(2 * len(UpperCamelCase__ ) )
A__ : List[Any] = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Dict = int(second_step[numbers_index * 2] )
A__ : List[str] = int(second_step[(numbers_index * 2) + 1] )
A__ : Dict = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = encoded_message + letter
return encoded_message
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = message.lower()
message.replace(''' ''' , '''''' )
A__ : List[Any] = np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : List[str] = self.letter_to_numbers(message[letter_index] )
A__ : Dict = numbers[0]
A__ : int = numbers[1]
A__ : Optional[Any] = first_step.reshape((2, len(UpperCamelCase__ )) )
A__ : int = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Tuple = int(second_step[0, numbers_index] )
A__ : Dict = int(second_step[1, numbers_index] )
A__ : List[str] = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = decoded_message + letter
return decoded_message
| 55
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Optional[int] = tempfile.mkdtemp()
A__ : Optional[int] = BlipImageProcessor()
A__ : Union[str, Any] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
A__ : List[str] = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
A__ : Union[str, Any] = InstructBlipProcessor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self , **UpperCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer
def __snake_case ( self , **UpperCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def __snake_case ( self , **UpperCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).qformer_tokenizer
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
A__ : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ : Any = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self ):
A__ : List[str] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A__ : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A__ : Dict = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
A__ : Tuple = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Dict = self.get_image_processor()
A__ : Any = self.get_tokenizer()
A__ : Optional[int] = self.get_qformer_tokenizer()
A__ : List[str] = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A__ : List[Any] = self.prepare_image_inputs()
A__ : List[Any] = image_processor(UpperCamelCase__ , return_tensors='''np''' )
A__ : List[str] = processor(images=UpperCamelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self ):
A__ : Optional[int] = self.get_image_processor()
A__ : str = self.get_tokenizer()
A__ : List[Any] = self.get_qformer_tokenizer()
A__ : Union[str, Any] = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A__ : List[Any] = '''lower newer'''
A__ : Union[str, Any] = processor(text=UpperCamelCase__ )
A__ : List[Any] = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
A__ : Dict = qformer_tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def __snake_case ( self ):
A__ : List[str] = self.get_image_processor()
A__ : Optional[Any] = self.get_tokenizer()
A__ : List[Any] = self.get_qformer_tokenizer()
A__ : int = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A__ : Optional[int] = '''lower newer'''
A__ : Optional[int] = self.prepare_image_inputs()
A__ : Any = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def __snake_case ( self ):
A__ : List[str] = self.get_image_processor()
A__ : Tuple = self.get_tokenizer()
A__ : str = self.get_qformer_tokenizer()
A__ : Any = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A__ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ : Optional[int] = processor.batch_decode(UpperCamelCase__ )
A__ : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[int] = self.get_image_processor()
A__ : Optional[int] = self.get_tokenizer()
A__ : str = self.get_qformer_tokenizer()
A__ : Optional[int] = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
A__ : Optional[Any] = '''lower newer'''
A__ : Any = self.prepare_image_inputs()
A__ : Dict = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 713
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 55
| 0
|
import functools
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str ) -> int:
"""simple docstring"""
A__ : Dict = len(__UpperCamelCase )
A__ : str = len(__UpperCamelCase )
@functools.cache
def min_distance(__UpperCamelCase : int , __UpperCamelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
A__ : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __UpperCamelCase ) , 1 + min_distance(__UpperCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : str = 1_6
_SCREAMING_SNAKE_CASE : Tuple = 3_2
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__UpperCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : Optional[int] = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : int = 16
elif accelerator.mixed_precision != "no":
A__ : Any = 8
else:
A__ : Union[str, Any] = None
return tokenizer.pad(
__UpperCamelCase , padding='''longest''' , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
A__ : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : Dict = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __UpperCamelCase ) == "1":
A__ : List[str] = 2
# Initialize accelerator
A__ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Tuple = config['''lr''']
A__ : Dict = int(config['''num_epochs'''] )
A__ : int = int(config['''seed'''] )
A__ : Optional[Any] = int(config['''batch_size'''] )
A__ : int = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
A__ : Dict = MAX_GPU_BATCH_SIZE
set_seed(__UpperCamelCase )
A__ , A__ : int = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
A__ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
A__ : Any = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Dict = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ : Dict = model(**__UpperCamelCase )
A__ : Dict = outputs.loss
A__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
A__ : Optional[int] = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Union[str, Any] = model(**__UpperCamelCase )
A__ : int = outputs.logits.argmax(dim=-1 )
A__ , A__ : Optional[Any] = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__UpperCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
A__ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
A__ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ : Dict = parser.parse_args()
A__ : Any = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'gpt-neox-20b': 2_0_4_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : List[Any] = add_prefix_space
A__ : Any = pre_tok_class(**UpperCamelCase__ )
A__ : List[Any] = add_prefix_space
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
A__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 715
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "microsoft/speecht5_tts"
_lowerCAmelCase = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_lowerCAmelCase = "text_reader"
_lowerCAmelCase = SpeechTaProcessor
_lowerCAmelCase = SpeechTaForTextToSpeech
_lowerCAmelCase = SpeechTaHifiGan
_lowerCAmelCase = ["text"]
_lowerCAmelCase = ["audio"]
def __snake_case ( self ):
if self.post_processor is None:
A__ : int = '''microsoft/speecht5_hifigan'''
super().setup()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
A__ : List[Any] = self.pre_processor(text=UpperCamelCase__ , return_tensors='''pt''' , truncation=UpperCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
A__ : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
A__ : Dict = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.post_processor(UpperCamelCase__ ).cpu().detach()
| 55
| 0
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
set_seed(7_7_0)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
_SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(os.path.abspath(__file__))
_SCREAMING_SNAKE_CASE : List[str] = os.path.join(os.path.expanduser('~'), '.cache')
_SCREAMING_SNAKE_CASE : Tuple = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any]=False ) -> Optional[Any]:
"""simple docstring"""
A__ : Optional[Any] = model_type
if use_small:
key += "_small"
return os.path.join(__UpperCamelCase , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
hf_hub_download(repo_id=__UpperCamelCase , filename=__UpperCamelCase , local_dir=__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : int=False , __UpperCamelCase : Optional[int]="text" ) -> Union[str, Any]:
"""simple docstring"""
if model_type == "text":
A__ : Optional[int] = BarkSemanticModel
A__ : str = BarkSemanticConfig
A__ : int = BarkSemanticGenerationConfig
elif model_type == "coarse":
A__ : Optional[int] = BarkCoarseModel
A__ : Tuple = BarkCoarseConfig
A__ : Optional[int] = BarkCoarseGenerationConfig
elif model_type == "fine":
A__ : str = BarkFineModel
A__ : List[Any] = BarkFineConfig
A__ : Optional[Any] = BarkFineGenerationConfig
else:
raise NotImplementedError()
A__ : Optional[Any] = F"{model_type}_small" if use_small else model_type
A__ : str = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__UpperCamelCase ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
A__ : int = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )
# this is a hack
A__ : Union[str, Any] = checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
A__ : Any = model_args['''vocab_size''']
A__ : int = model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
A__ : Optional[Any] = model_args.pop('''n_head''' )
A__ : Tuple = model_args.pop('''n_embd''' )
A__ : Tuple = model_args.pop('''n_layer''' )
A__ : Any = ConfigClass(**checkpoint['''model_args'''] )
A__ : Tuple = ModelClass(config=__UpperCamelCase )
A__ : Any = GenerationConfigClass()
A__ : Optional[int] = model_generation_config
A__ : Union[str, Any] = checkpoint['''model''']
# fixup checkpoint
A__ : List[str] = '''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(__UpperCamelCase ):
# replace part of the key with corresponding layer name in HF implementation
A__ : List[Any] = k[len(__UpperCamelCase ) :]
for old_layer_name in new_layer_name_dict:
A__ : List[Any] = new_k.replace(__UpperCamelCase , new_layer_name_dict[old_layer_name] )
A__ : Optional[int] = state_dict.pop(__UpperCamelCase )
A__ : List[str] = set(state_dict.keys() ) - set(model.state_dict().keys() )
A__ : Optional[int] = {k for k in extra_keys if not k.endswith('''.attn.bias''' )}
A__ : Optional[int] = set(model.state_dict().keys() ) - set(state_dict.keys() )
A__ : Any = {k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(__UpperCamelCase ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(__UpperCamelCase ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
A__ : Optional[Any] = model.num_parameters(exclude_embeddings=__UpperCamelCase )
A__ : str = checkpoint['''best_val_loss'''].item()
logger.info(F"model loaded: {round(n_params/1e6 , 1 )}M params, {round(__UpperCamelCase , 3 )} loss" )
model.eval()
model.to(__UpperCamelCase )
del checkpoint, state_dict
return model
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Tuple="text" ) -> int:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
A__ : str = '''cpu''' # do conversion on cpu
A__ : int = _get_ckpt_path(__UpperCamelCase , use_small=__UpperCamelCase )
A__ : Optional[int] = _load_model(__UpperCamelCase , __UpperCamelCase , model_type=__UpperCamelCase , use_small=__UpperCamelCase )
# load bark initial model
A__ : List[Any] = _bark_load_model(__UpperCamelCase , '''cpu''' , model_type=__UpperCamelCase , use_small=__UpperCamelCase )
if model_type == "text":
A__ : Optional[int] = bark_model['''model''']
if model.num_parameters(exclude_embeddings=__UpperCamelCase ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
A__ : Optional[Any] = 5
A__ : Dict = 10
if model_type in ["text", "coarse"]:
A__ : Tuple = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
A__ : Tuple = bark_model(__UpperCamelCase )[0]
A__ : int = model(__UpperCamelCase )
# take last logits
A__ : int = output_new_model_total.logits[:, [-1], :]
else:
A__ : str = 3
A__ : List[str] = 8
A__ : List[Any] = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
A__ : List[Any] = model(__UpperCamelCase , __UpperCamelCase )
A__ : Union[str, Any] = bark_model(__UpperCamelCase , __UpperCamelCase )
A__ : Optional[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , ) -> Dict:
"""simple docstring"""
A__ : int = os.path.join(__UpperCamelCase , __UpperCamelCase )
A__ : List[str] = BarkSemanticConfig.from_pretrained(os.path.join(__UpperCamelCase , '''config.json''' ) )
A__ : Optional[Any] = BarkCoarseConfig.from_pretrained(os.path.join(__UpperCamelCase , '''config.json''' ) )
A__ : str = BarkFineConfig.from_pretrained(os.path.join(__UpperCamelCase , '''config.json''' ) )
A__ : int = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
A__ : Dict = BarkSemanticModel.from_pretrained(__UpperCamelCase )
A__ : str = BarkCoarseModel.from_pretrained(__UpperCamelCase )
A__ : Optional[int] = BarkFineModel.from_pretrained(__UpperCamelCase )
A__ : int = EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
A__ : str = BarkConfig.from_sub_model_configs(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ : Any = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
A__ : List[Any] = BarkModel(__UpperCamelCase )
A__ : List[Any] = semantic
A__ : Tuple = coarseAcoustic
A__ : Union[str, Any] = fineAcoustic
A__ : Any = codec
A__ : Union[str, Any] = bark_generation_config
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
bark.save_pretrained(__UpperCamelCase , repo_id=__UpperCamelCase , push_to_hub=__UpperCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 716
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'gpt-neox-20b': 2_0_4_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : List[Any] = add_prefix_space
A__ : Any = pre_tok_class(**UpperCamelCase__ )
A__ : List[Any] = add_prefix_space
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
A__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 55
| 0
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , **__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : int = AutoConfig.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ : int = AutoModelForSeqaSeqLM.from_config(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
AutoTokenizer.from_pretrained(__UpperCamelCase ).save_pretrained(__UpperCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 717
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "deformable_detr"
_lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=3 , UpperCamelCase__=300 , UpperCamelCase__=1024 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1.0 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="sine" , UpperCamelCase__="resnet50" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=False , UpperCamelCase__=300 , UpperCamelCase__=False , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , UpperCamelCase__=0.2_5 , UpperCamelCase__=False , **UpperCamelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A__ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = backbone_config.get('''model_type''' )
A__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
A__ : Optional[int] = config_class.from_dict(UpperCamelCase__ )
A__ : Tuple = use_timm_backbone
A__ : int = backbone_config
A__ : List[Any] = num_channels
A__ : List[Any] = num_queries
A__ : str = max_position_embeddings
A__ : Tuple = d_model
A__ : int = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : Optional[Any] = encoder_attention_heads
A__ : List[Any] = decoder_ffn_dim
A__ : Tuple = decoder_layers
A__ : Optional[Any] = decoder_attention_heads
A__ : List[str] = dropout
A__ : str = attention_dropout
A__ : List[Any] = activation_dropout
A__ : Any = activation_function
A__ : Optional[Any] = init_std
A__ : Union[str, Any] = init_xavier_std
A__ : Union[str, Any] = encoder_layerdrop
A__ : Optional[int] = auxiliary_loss
A__ : str = position_embedding_type
A__ : List[Any] = backbone
A__ : Optional[Any] = use_pretrained_backbone
A__ : Any = dilation
# deformable attributes
A__ : List[Any] = num_feature_levels
A__ : List[str] = encoder_n_points
A__ : int = decoder_n_points
A__ : List[Any] = two_stage
A__ : Dict = two_stage_num_proposals
A__ : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
A__ : List[str] = class_cost
A__ : List[Any] = bbox_cost
A__ : Any = giou_cost
# Loss coefficients
A__ : List[str] = mask_loss_coefficient
A__ : Union[str, Any] = dice_loss_coefficient
A__ : List[Any] = bbox_loss_coefficient
A__ : Tuple = giou_loss_coefficient
A__ : Optional[Any] = eos_coefficient
A__ : List[Any] = focal_alpha
A__ : List[str] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.encoder_attention_heads
@property
def __snake_case ( self ):
return self.d_model
def __snake_case ( self ):
A__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : Tuple = self.backbone_config.to_dict()
A__ : Optional[int] = self.__class__.model_type
return output
| 55
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : Tuple = 1_6
_SCREAMING_SNAKE_CASE : Optional[Any] = 3_2
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 ) -> Optional[Any]:
"""simple docstring"""
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ : Optional[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__UpperCamelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
A__ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : str = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : Union[str, Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
A__ : Dict = 8
else:
A__ : List[Any] = None
return tokenizer.pad(
__UpperCamelCase , padding='''longest''' , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ : Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
A__ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : Optional[int] = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] ) -> int:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __UpperCamelCase ) == "1":
A__ : Tuple = 2
# New Code #
A__ : Optional[int] = int(args.gradient_accumulation_steps )
A__ : List[Any] = int(args.local_sgd_steps )
# Initialize accelerator
A__ : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Tuple = config['''lr''']
A__ : List[Any] = int(config['''num_epochs'''] )
A__ : int = int(config['''seed'''] )
A__ : List[Any] = int(config['''batch_size'''] )
A__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
set_seed(__UpperCamelCase )
A__ : List[Any] = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Dict = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
A__ : Dict = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
A__ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ : Dict = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=__UpperCamelCase , model=__UpperCamelCase , local_sgd_steps=__UpperCamelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
A__ : Tuple = model(**__UpperCamelCase )
A__ : str = output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : str = model(**__UpperCamelCase )
A__ : Dict = outputs.logits.argmax(dim=-1 )
A__ : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
A__ : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
A__ : List[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__UpperCamelCase , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=__UpperCamelCase , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ : Optional[int] = parser.parse_args()
A__ : Optional[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 718
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] = 0
A__ : Optional[Any] = len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
if len(__UpperCamelCase ) <= 1:
return arr, 0
A__ : Optional[int] = len(__UpperCamelCase ) // 2
A__ : List[str] = arr[0:mid]
A__ : Union[str, Any] = arr[mid:]
A__ , A__ : List[Any] = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : Dict = _count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
A__ : Any = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
A__ : str = []
A__ : Tuple = 0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A__ : int = count_inversions_bf(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A__ : Optional[Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Dict = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
# an empty list should also have zero inversions
A__ : Union[str, Any] = []
A__ : Union[str, Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Any = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 0
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , ) -> str:
"""simple docstring"""
A__ : Union[str, Any] = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
A__ : List[Any] = input_paths_and_base_extractors[compression_format]
if input_path is None:
A__ : Tuple = F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCamelCase )
assert base_extractor.is_extractable(__UpperCamelCase )
A__ : List[str] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(__UpperCamelCase , __UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
A__ : List[str] = file_path.read_text(encoding='''utf-8''' )
else:
A__ : Any = output_path.read_text(encoding='''utf-8''' )
A__ : Any = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , ) -> List[str]:
"""simple docstring"""
A__ : List[Any] = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
A__ : Any = input_paths[compression_format]
if input_path is None:
A__ : Optional[Any] = F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCamelCase )
A__ : Dict = Extractor.infer_extractor_format(__UpperCamelCase )
assert extractor_format is not None
A__ : Optional[Any] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
A__ : Optional[int] = file_path.read_text(encoding='''utf-8''' )
else:
A__ : Union[str, Any] = output_path.read_text(encoding='''utf-8''' )
A__ : Any = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : List[str] ) -> Tuple:
"""simple docstring"""
import tarfile
A__ : Any = tmp_path / '''data_dot_dot'''
directory.mkdir()
A__ : Optional[Any] = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(__UpperCamelCase , '''w''' ) as f:
f.add(__UpperCamelCase , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
import tarfile
A__ : int = tmp_path / '''data_sym_link'''
directory.mkdir()
A__ : List[Any] = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=__UpperCamelCase )
with tarfile.TarFile(__UpperCamelCase , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
A__ : int = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
A__ : str = insecure_tar_files[insecure_tar_file]
A__ : Dict = tmp_path / '''extracted'''
TarExtractor.extract(__UpperCamelCase , __UpperCamelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
A__ : Any = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
A__ : int = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(__UpperCamelCase )
assert zipfile.is_zipfile(str(__UpperCamelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__UpperCamelCase ) # but we're right
| 719
|
from PIL import Image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Image , __UpperCamelCase : float ) -> Image:
"""simple docstring"""
def brightness(__UpperCamelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_SCREAMING_SNAKE_CASE : Dict = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 55
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Tuple = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_SCREAMING_SNAKE_CASE : Any = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
A__ : int = torch.load(__UpperCamelCase , map_location='''cpu''' )
return sd
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple=rename_keys_prefix ) -> int:
"""simple docstring"""
A__ : Dict = OrderedDict()
A__ : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A__ : Optional[int] = key
for name_pair in rename_keys_prefix:
A__ : Dict = new_key.replace(name_pair[0] , name_pair[1] )
A__ : Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A__ : str = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[str] ) -> str:
"""simple docstring"""
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
A__ : str = '''pretraining'''
if "vcr" in checkpoint_path:
A__ : int = {'''visual_embedding_dim''': 5_12}
elif "vqa_advanced" in checkpoint_path:
A__ : List[str] = {'''visual_embedding_dim''': 20_48}
elif "vqa" in checkpoint_path:
A__ : Optional[Any] = {'''visual_embedding_dim''': 20_48}
elif "nlvr" in checkpoint_path:
A__ : Any = {'''visual_embedding_dim''': 10_24}
else:
raise NotImplementedError(F"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
A__ : int = {'''visual_embedding_dim''': 5_12}
A__ : str = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
A__ : Union[str, Any] = {'''visual_embedding_dim''': 20_48}
A__ : List[Any] = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
A__ : Tuple = {'''visual_embedding_dim''': 20_48, '''num_labels''': 31_29}
A__ : Optional[int] = '''vqa'''
elif "nlvr" in checkpoint_path:
A__ : Dict = {
'''visual_embedding_dim''': 10_24,
'''num_labels''': 2,
}
A__ : Optional[int] = '''nlvr'''
A__ : str = VisualBertConfig(**__UpperCamelCase )
# Load State Dict
A__ : int = load_state_dict(__UpperCamelCase )
A__ : Optional[Any] = get_new_dict(__UpperCamelCase , __UpperCamelCase )
if model_type == "pretraining":
A__ : Optional[int] = VisualBertForPreTraining(__UpperCamelCase )
elif model_type == "vqa":
A__ : Union[str, Any] = VisualBertForQuestionAnswering(__UpperCamelCase )
elif model_type == "nlvr":
A__ : Tuple = VisualBertForVisualReasoning(__UpperCamelCase )
elif model_type == "multichoice":
A__ : List[str] = VisualBertForMultipleChoice(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Save Checkpoints
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 720
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
def __snake_case ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
A__ : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
A__ : Dict = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
A__ : Optional[int] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : str = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 55
| 0
|
'''simple docstring'''
import qiskit
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> qiskit.result.counts.Counts:
"""simple docstring"""
A__ : str = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
A__ : int = qiskit.QuantumCircuit(__UpperCamelCase , __UpperCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
A__ : List[str] = qiskit.execute(__UpperCamelCase , __UpperCamelCase , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 721
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_SCREAMING_SNAKE_CASE : Union[str, Any] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_SCREAMING_SNAKE_CASE : Tuple = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_SCREAMING_SNAKE_CASE : Optional[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , ):
A__ : List[Any] = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
A__ : Dict = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
A__ : Optional[Any] = TER(
normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , )
A__ : str = sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 55
| 0
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
A__ : Any = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
A__ : Dict = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
A__ : Tuple = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
# TODO Update this
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "esm"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1026 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[Any] = vocab_size
A__ : int = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : str = intermediate_size
A__ : List[str] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : List[str] = initializer_range
A__ : List[Any] = layer_norm_eps
A__ : int = position_embedding_type
A__ : Optional[Any] = use_cache
A__ : Optional[int] = emb_layer_norm_before
A__ : List[str] = token_dropout
A__ : Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
A__ : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = EsmFoldConfig(**UpperCamelCase__ )
A__ : int = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
A__ : Any = get_default_vocab_list()
else:
A__ : Dict = vocab_list
else:
A__ : Optional[Any] = None
A__ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __snake_case ( self ):
A__ : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
A__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 0
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.trunk is None:
A__ : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
A__ : List[Any] = TrunkConfig(**self.trunk )
def __snake_case ( self ):
A__ : Optional[int] = asdict(self )
A__ : int = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 48
_lowerCAmelCase = 1_024
_lowerCAmelCase = 128
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = False
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.structure_module is None:
A__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
A__ : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A__ : Tuple = self.sequence_state_dim // self.sequence_head_width
A__ : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __snake_case ( self ):
A__ : List[Any] = asdict(self )
A__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 384
_lowerCAmelCase = 128
_lowerCAmelCase = 16
_lowerCAmelCase = 128
_lowerCAmelCase = 12
_lowerCAmelCase = 4
_lowerCAmelCase = 8
_lowerCAmelCase = 0.1
_lowerCAmelCase = 8
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 7
_lowerCAmelCase = 10
_lowerCAmelCase = 1e-8
_lowerCAmelCase = 1e5
def __snake_case ( self ):
return asdict(self )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55
| 0
|
import os
import sys
import unittest
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_SCREAMING_SNAKE_CASE : Dict = os.path.join(git_repo_path, 'src', 'transformers')
_SCREAMING_SNAKE_CASE : Dict = '\n{0} = None\n'
_SCREAMING_SNAKE_CASE : Dict = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
_SCREAMING_SNAKE_CASE : int = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class UpperCamelCase__ ( unittest.TestCase ):
def __snake_case ( self ):
A__ : List[str] = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(UpperCamelCase__ )
A__ : Tuple = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(UpperCamelCase__ , '''tokenizers''' )
A__ : Tuple = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(UpperCamelCase__ , '''tensorflow_text''' )
A__ : Any = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(UpperCamelCase__ , '''sentencepiece_and_tokenizers''' )
A__ : int = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(UpperCamelCase__ , '''sentencepiece_and_tensorflow_text''' )
A__ : List[Any] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(UpperCamelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __snake_case ( self ):
A__ : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , UpperCamelCase__ )
self.assertIn('''tensorflow_text''' , UpperCamelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , UpperCamelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __snake_case ( self ):
A__ : str = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(UpperCamelCase__ , '''\nCONSTANT = None\n''' )
A__ : int = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
UpperCamelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
A__ : Optional[Any] = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
A__ : Union[str, Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
A__ : Any = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , UpperCamelCase__ )
| 701
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1024 , UpperCamelCase__=1024 , UpperCamelCase__=3.6 ):
A__ : str = tokenizer
A__ : int = tokenizer.bos_token_id
A__ : List[Any] = dataset
A__ : Tuple = seq_length
A__ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
A__ : Dict = iter(self.dataset )
A__ : Tuple = True
while more_examples:
A__ , A__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ : Dict = False
break
A__ : str = tokenizer(UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
A__ : Optional[int] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCamelCase__ ) , self.seq_length ):
A__ : Optional[int] = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase__ ) == self.seq_length:
yield torch.tensor(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
A__ : Any = {'''streaming''': True}
A__ : List[str] = load_dataset(args.dataset_name , split='''train''' , **__UpperCamelCase )
A__ : List[str] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
A__ : int = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
model.eval()
A__ : Dict = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
A__ : Any = model(__UpperCamelCase , labels=__UpperCamelCase )
A__ : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ : Tuple = torch.mean(torch.cat(__UpperCamelCase ) )
try:
A__ : Optional[Any] = torch.exp(__UpperCamelCase )
except OverflowError:
A__ : Union[str, Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_SCREAMING_SNAKE_CASE : List[Any] = Accelerator()
# Parse configuration
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(EvaluationArguments)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_SCREAMING_SNAKE_CASE : Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 55
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702
|
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 55
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = field(default="question-answering-extractive", metadata={"include_in_asdict_even_if_is_default": True} )
_lowerCAmelCase = Features({"question": Value("string" ), "context": Value("string" )} )
_lowerCAmelCase = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
_lowerCAmelCase = "question"
_lowerCAmelCase = "context"
_lowerCAmelCase = "answers"
@property
def __snake_case ( self ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 703
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Dict = inspect.getfile(accelerate.test_utils )
A__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A__ : Tuple = test_metrics
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __snake_case ( self ):
self.test_metrics.main()
@require_multi_gpu
def __snake_case ( self ):
print(F"Found {torch.cuda.device_count()} devices." )
A__ : int = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 55
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "ibert"
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=False , UpperCamelCase__="none" , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[Any] = vocab_size
A__ : Optional[int] = hidden_size
A__ : Optional[Any] = num_hidden_layers
A__ : Any = num_attention_heads
A__ : str = hidden_act
A__ : Tuple = intermediate_size
A__ : int = hidden_dropout_prob
A__ : int = attention_probs_dropout_prob
A__ : List[str] = max_position_embeddings
A__ : Union[str, Any] = type_vocab_size
A__ : Union[str, Any] = initializer_range
A__ : Any = layer_norm_eps
A__ : List[Any] = position_embedding_type
A__ : List[str] = quant_mode
A__ : Tuple = force_dequant
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def __snake_case ( self ):
if self.task == "multiple-choice":
A__ : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A__ : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 704
|
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 0
|
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple="shi-labs/oneformer_demo" ) -> Dict:
"""simple docstring"""
with open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) as f:
A__ = json.load(__UpperCamelCase )
A__ = {}
A__ = []
A__ = []
for key, info in class_info.items():
A__ = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(__UpperCamelCase ) )
A__ = thing_ids
A__ = class_names
return metadata
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=30 , UpperCamelCase__=400 , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=10 , UpperCamelCase__=False , UpperCamelCase__=255 , UpperCamelCase__="shi-labs/oneformer_demo" , UpperCamelCase__="ade20k_panoptic.json" , UpperCamelCase__=10 , ):
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = {'''shortest_edge''': 32, '''longest_edge''': 1333} if size is None else size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = class_info_file
A__ = prepare_metadata(UpperCamelCase__ , UpperCamelCase__ )
A__ = num_text
A__ = repo_path
# for the post_process_functions
A__ = 2
A__ = 10
A__ = 10
A__ = 3
A__ = 4
A__ = num_labels
A__ = do_reduce_labels
A__ = ignore_index
def __snake_case ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=False ):
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase__ , Image.Image ):
A__ = image.size
else:
A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size['''shortest_edge'''] * h / w )
A__ = self.size['''shortest_edge''']
elif w > h:
A__ = self.size['''shortest_edge''']
A__ = int(self.size['''shortest_edge'''] * w / h )
else:
A__ = self.size['''shortest_edge''']
A__ = self.size['''shortest_edge''']
else:
A__ = []
for image in image_inputs:
A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[0] )[0]
A__ = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[1] )[1]
return expected_height, expected_width
def __snake_case ( self ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_lowerCAmelCase = image_processing_class
def __snake_case ( self ):
A__ = OneFormerImageProcessorTester(self )
@property
def __snake_case ( self ):
return self.image_processing_tester.prepare_image_processor_dict()
def __snake_case ( self ):
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''ignore_index''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''class_info_file''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''num_text''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''repo_path''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''metadata''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_reduce_labels''' ) )
def __snake_case ( self ):
pass
def __snake_case ( self ):
# Initialize image_processor
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
A__ = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
A__ = self.image_processing_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = self.image_processing_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
A__ = image_processor(
UpperCamelCase__ , ['''semantic'''] * len(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self ):
# Initialize image_processor
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
A__ = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
A__ = self.image_processing_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = self.image_processing_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
A__ = image_processor(
UpperCamelCase__ , ['''semantic'''] * len(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self ):
# Initialize image_processor
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
A__ = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
A__ = self.image_processing_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = self.image_processing_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
A__ = image_processor(
UpperCamelCase__ , ['''semantic'''] * len(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__="np" ):
A__ = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
A__ = self.image_processing_tester.num_labels
A__ = None
A__ = None
A__ = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase__ )
if with_segmentation_maps:
A__ = num_labels
if is_instance_map:
A__ = list(range(UpperCamelCase__ ) ) * 2
A__ = dict(enumerate(UpperCamelCase__ ) )
A__ = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
A__ = [Image.fromarray(UpperCamelCase__ ) for annotation in annotations]
A__ = image_processor(
UpperCamelCase__ , ['''semantic'''] * len(UpperCamelCase__ ) , UpperCamelCase__ , return_tensors='''pt''' , instance_id_to_semantic_id=UpperCamelCase__ , pad_and_return_pixel_mask=UpperCamelCase__ , )
return inputs
def __snake_case ( self ):
pass
def __snake_case ( self ):
def common(UpperCamelCase__=False , UpperCamelCase__=None ):
A__ = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase__ , is_instance_map=UpperCamelCase__ , segmentation_type=UpperCamelCase__ )
A__ = inputs['''mask_labels''']
A__ = inputs['''class_labels''']
A__ = inputs['''pixel_values''']
A__ = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase__ )
common(is_instance_map=UpperCamelCase__ , segmentation_type='''pil''' )
common(is_instance_map=UpperCamelCase__ , segmentation_type='''pil''' )
def __snake_case ( self ):
A__ = np.zeros((20, 50) )
A__ = 1
A__ = 1
A__ = 1
A__ = binary_mask_to_rle(UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def __snake_case ( self ):
A__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
A__ = self.image_processing_tester.get_fake_oneformer_outputs()
A__ = fature_extractor.post_process_semantic_segmentation(UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
A__ = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
A__ = fature_extractor.post_process_semantic_segmentation(UpperCamelCase__ , target_sizes=UpperCamelCase__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def __snake_case ( self ):
A__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
A__ = self.image_processing_tester.get_fake_oneformer_outputs()
A__ = image_processor.post_process_instance_segmentation(UpperCamelCase__ , threshold=0 )
self.assertTrue(len(UpperCamelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , UpperCamelCase__ )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def __snake_case ( self ):
A__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
A__ = self.image_processing_tester.get_fake_oneformer_outputs()
A__ = image_processor.post_process_panoptic_segmentation(UpperCamelCase__ , threshold=0 )
self.assertTrue(len(UpperCamelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , UpperCamelCase__ )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 705
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : int = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Union[str, Any] = tempfile.mkdtemp()
A__ : Optional[Any] = SamImageProcessor()
A__ : List[str] = SamProcessor(UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self , **UpperCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
A__ : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ : Optional[int] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self ):
A__ : Union[str, Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ : str = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
A__ : List[str] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def __snake_case ( self ):
A__ : int = self.get_image_processor()
A__ : Optional[Any] = SamProcessor(image_processor=UpperCamelCase__ )
A__ : str = self.prepare_image_inputs()
A__ : Dict = image_processor(UpperCamelCase__ , return_tensors='''np''' )
A__ : Dict = processor(images=UpperCamelCase__ , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def __snake_case ( self ):
A__ : str = self.get_image_processor()
A__ : Tuple = SamProcessor(image_processor=UpperCamelCase__ )
A__ : Any = [torch.ones((1, 3, 5, 5) )]
A__ : Union[str, Any] = [[1764, 2646]]
A__ : Optional[int] = [[683, 1024]]
A__ : int = processor.post_process_masks(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
A__ : List[Any] = processor.post_process_masks(
UpperCamelCase__ , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
A__ : Optional[Any] = [np.ones((1, 3, 5, 5) )]
A__ : Optional[int] = processor.post_process_masks(UpperCamelCase__ , np.array(UpperCamelCase__ ) , np.array(UpperCamelCase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
A__ : List[str] = [[1, 0], [0, 1]]
with self.assertRaises(UpperCamelCase__ ):
A__ : int = processor.post_process_masks(UpperCamelCase__ , np.array(UpperCamelCase__ ) , np.array(UpperCamelCase__ ) )
@require_vision
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Union[str, Any] = tempfile.mkdtemp()
A__ : List[str] = SamImageProcessor()
A__ : str = SamProcessor(UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self , **UpperCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
A__ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ : Any = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self ):
A__ : Tuple = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ : Any = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
A__ : List[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[int] = self.get_image_processor()
A__ : Optional[int] = SamProcessor(image_processor=UpperCamelCase__ )
A__ : Dict = self.prepare_image_inputs()
A__ : List[str] = image_processor(UpperCamelCase__ , return_tensors='''np''' )
A__ : Optional[int] = processor(images=UpperCamelCase__ , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def __snake_case ( self ):
A__ : Optional[Any] = self.get_image_processor()
A__ : Optional[int] = SamProcessor(image_processor=UpperCamelCase__ )
A__ : str = [tf.ones((1, 3, 5, 5) )]
A__ : List[Any] = [[1764, 2646]]
A__ : Tuple = [[683, 1024]]
A__ : Optional[int] = processor.post_process_masks(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
A__ : Dict = processor.post_process_masks(
UpperCamelCase__ , tf.convert_to_tensor(UpperCamelCase__ ) , tf.convert_to_tensor(UpperCamelCase__ ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
A__ : str = [np.ones((1, 3, 5, 5) )]
A__ : str = processor.post_process_masks(
UpperCamelCase__ , np.array(UpperCamelCase__ ) , np.array(UpperCamelCase__ ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
A__ : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A__ : str = processor.post_process_masks(
UpperCamelCase__ , np.array(UpperCamelCase__ ) , np.array(UpperCamelCase__ ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Optional[int] = tempfile.mkdtemp()
A__ : Any = SamImageProcessor()
A__ : Union[str, Any] = SamProcessor(UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self , **UpperCamelCase__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
A__ : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ : Dict = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __snake_case ( self ):
A__ : Any = self.get_image_processor()
A__ : List[Any] = SamProcessor(image_processor=UpperCamelCase__ )
A__ : List[Any] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
A__ : Tuple = [tf.convert_to_tensor(UpperCamelCase__ )]
A__ : Any = [torch.tensor(UpperCamelCase__ )]
A__ : Optional[int] = [[1764, 2646]]
A__ : Any = [[683, 1024]]
A__ : Optional[int] = processor.post_process_masks(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , return_tensors='''tf''' )
A__ : List[Any] = processor.post_process_masks(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __snake_case ( self ):
A__ : Dict = self.get_image_processor()
A__ : Dict = SamProcessor(image_processor=UpperCamelCase__ )
A__ : Union[str, Any] = self.prepare_image_inputs()
A__ : Dict = image_processor(UpperCamelCase__ , return_tensors='''pt''' )['''pixel_values'''].numpy()
A__ : List[str] = processor(images=UpperCamelCase__ , return_tensors='''pt''' )['''pixel_values'''].numpy()
A__ : Union[str, Any] = image_processor(UpperCamelCase__ , return_tensors='''tf''' )['''pixel_values'''].numpy()
A__ : Union[str, Any] = processor(images=UpperCamelCase__ , return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
| 706
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/vocab.json')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures')
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def __snake_case ( self ):
A__ : List[Any] = 0
def __snake_case ( self ):
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = WavaVecaConfig()
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
A__ : Any = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Dict = WavaVecaFeatureExtractor()
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : Optional[int] = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : str = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[int] = WavaVecaFeatureExtractor()
A__ : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : str = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : List[Any] = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(UpperCamelCase__ )
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write('''{}''' )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A__ : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
A__ : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A__ : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A__ : List[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A__ : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
A__ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __snake_case ( self ):
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : str = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : str = CustomTokenizer(UpperCamelCase__ )
A__ : Optional[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "AutoFeatureExtractor"
_lowerCAmelCase = "AutoTokenizer"
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local classes.
A__ : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A__ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A__ : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
A__ : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __snake_case ( self ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __snake_case ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __snake_case ( self ):
A__ : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A__ : List[Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
A__ : int = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
A__ : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : List[Any] = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : Union[str, Any] = CustomTokenizer(UpperCamelCase__ )
A__ : List[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
A__ : Union[str, Any] = Repository(UpperCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(UpperCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A__ : Tuple = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 55
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **UpperCamelCase__ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A__ : Optional[int] = deprecated_arg[3:]
A__ : List[str] = not kwargs.pop(UpperCamelCase__ )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
A__ : str = kwargs.pop('''tpu_name''' , self.tpu_name )
A__ : int = kwargs.pop('''device_idx''' , self.device_idx )
A__ : Optional[int] = kwargs.pop('''eager_mode''' , self.eager_mode )
A__ : str = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**UpperCamelCase__ )
_lowerCAmelCase = field(
default=SCREAMING_SNAKE_CASE_, metadata={"help": "Name of TPU"}, )
_lowerCAmelCase = field(
default=0, metadata={"help": "CPU / GPU device index. Defaults to 0."}, )
_lowerCAmelCase = field(default=SCREAMING_SNAKE_CASE_, metadata={"help": "Benchmark models in eager model."} )
_lowerCAmelCase = field(
default=SCREAMING_SNAKE_CASE_, metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
}, )
@cached_property
def __snake_case ( self ):
requires_backends(self , ['''tf'''] )
A__ : List[str] = None
if self.tpu:
try:
if self.tpu_name:
A__ : str = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A__ : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A__ : List[Any] = None
return tpu
@cached_property
def __snake_case ( self ):
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A__ : Union[str, Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
A__ : Any = tf.distribute.OneDeviceStrategy(device=F"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
A__ : str = tf.distribute.OneDeviceStrategy(device=F"/cpu:{self.device_idx}" )
return strategy
@property
def __snake_case ( self ):
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def __snake_case ( self ):
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def __snake_case ( self ):
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def __snake_case ( self ):
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __snake_case ( self ):
return self.n_gpu > 0
| 707
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __snake_case ( UpperCamelCase__ ):
raise NotImplementedError()
@abstractmethod
def __snake_case ( self ):
raise NotImplementedError()
| 55
| 0
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_SCREAMING_SNAKE_CASE : str = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
_SCREAMING_SNAKE_CASE : int = dataset.iloc[:, 1:2].values
_SCREAMING_SNAKE_CASE : str = dataset.iloc[:, 2].values
_SCREAMING_SNAKE_CASE : str = train_test_split(X, y, test_size=0.2, random_state=0)
_SCREAMING_SNAKE_CASE : int = PolynomialFeatures(degree=4)
_SCREAMING_SNAKE_CASE : Optional[int] = poly_reg.fit_transform(X)
_SCREAMING_SNAKE_CASE : List[str] = LinearRegression()
pol_reg.fit(X_poly, y)
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
plt.scatter(__UpperCamelCase , __UpperCamelCase , color='''red''' )
plt.plot(__UpperCamelCase , pol_reg.predict(poly_reg.fit_transform(__UpperCamelCase ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 708
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=[30, 30] , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=8 , UpperCamelCase__=10 , ):
A__ : Optional[int] = parent
A__ : List[Any] = batch_size
A__ : Dict = image_size
A__ : Any = patch_size
A__ : Dict = num_channels
A__ : List[Any] = is_training
A__ : int = use_labels
A__ : Any = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : str = hidden_act
A__ : str = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Optional[int] = type_sequence_label_size
A__ : Any = initializer_range
A__ : Optional[int] = num_labels
A__ : Union[str, Any] = scope
A__ : Union[str, Any] = n_targets
A__ : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A__ : List[str] = num_patches + 1 + self.num_detection_tokens
def __snake_case ( self ):
A__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A__ : Tuple = []
for i in range(self.batch_size ):
A__ : List[Any] = {}
A__ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ )
A__ : Any = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ )
labels.append(UpperCamelCase__ )
A__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = YolosModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = YolosForObjectDetection(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ )
A__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __snake_case ( self ):
A__ : Optional[int] = self.prepare_config_and_inputs()
A__ , A__ , A__ : Optional[Any] = config_and_inputs
A__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Optional[int] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A__ : str = []
for i in range(self.model_tester.batch_size ):
A__ : int = {}
A__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long )
A__ : Dict = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float )
labels.append(UpperCamelCase__ )
A__ : Dict = labels
return inputs_dict
def __snake_case ( self ):
A__ : List[Any] = YolosModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
# YOLOS does not use inputs_embeds
pass
def __snake_case ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(UpperCamelCase__ )
A__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] = [*signature.parameters.keys()]
A__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ , A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Tuple = True
# in YOLOS, the seq_len is different
A__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A__ : Any = True
A__ : Optional[int] = False
A__ : Optional[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Tuple = True
A__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ : List[Any] = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[str] = True
A__ : List[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
A__ : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] = outputs.hidden_states
A__ : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# YOLOS has a different seq_length
A__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Tuple = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase__ )
A__ : str = self.default_image_processor
A__ : Tuple = prepare_img()
A__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Any = model(inputs.pixel_values )
# verify outputs
A__ : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=UpperCamelCase__ , )
A__ : Optional[int] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify postprocessing
A__ : Dict = image_processor.post_process_object_detection(
UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A__ : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCamelCase__ )
A__ : str = [75, 75, 17, 63, 17]
A__ : Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(UpperCamelCase__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase__ ) )
| 55
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] = 3_84
if "tiny" in model_name:
A__ : Any = [3, 3, 9, 3]
A__ : Union[str, Any] = [96, 1_92, 3_84, 7_68]
if "small" in model_name:
A__ : int = [3, 3, 27, 3]
A__ : Dict = [96, 1_92, 3_84, 7_68]
if "base" in model_name:
A__ : Tuple = [3, 3, 27, 3]
A__ : List[Any] = [1_28, 2_56, 5_12, 10_24]
A__ : Union[str, Any] = 5_12
if "large" in model_name:
A__ : int = [3, 3, 27, 3]
A__ : Union[str, Any] = [1_92, 3_84, 7_68, 15_36]
A__ : Optional[Any] = 7_68
if "xlarge" in model_name:
A__ : Dict = [3, 3, 27, 3]
A__ : str = [2_56, 5_12, 10_24, 20_48]
A__ : List[str] = 10_24
# set label information
A__ : Any = 1_50
A__ : Optional[int] = '''huggingface/label-files'''
A__ : Union[str, Any] = '''ade20k-id2label.json'''
A__ : List[Any] = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
A__ : Any = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ : Any = {v: k for k, v in idalabel.items()}
A__ : int = ConvNextConfig(
depths=__UpperCamelCase , hidden_sizes=__UpperCamelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
A__ : int = UperNetConfig(
backbone_config=__UpperCamelCase , auxiliary_in_channels=__UpperCamelCase , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase , )
return config
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Dict:
"""simple docstring"""
A__ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Optional[Any] = dct.pop(__UpperCamelCase )
A__ : Dict = val
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
A__ : int = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
A__ : Dict = model_name_to_url[model_name]
A__ : str = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='''cpu''' )['''state_dict''']
A__ : Dict = get_upernet_config(__UpperCamelCase )
A__ : Union[str, Any] = UperNetForSemanticSegmentation(__UpperCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A__ : int = state_dict.pop(__UpperCamelCase )
if "bn" in key:
A__ : Union[str, Any] = key.replace('''bn''' , '''batch_norm''' )
A__ : Union[str, Any] = val
# rename keys
A__ : List[str] = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# verify on image
A__ : List[Any] = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
A__ : int = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert('''RGB''' )
A__ : Union[str, Any] = SegformerImageProcessor()
A__ : Union[str, Any] = processor(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
A__ : Dict = model(__UpperCamelCase )
if model_name == "upernet-convnext-tiny":
A__ : Union[str, Any] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
A__ : List[str] = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
A__ : Any = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
A__ : Union[str, Any] = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
A__ : List[Any] = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 709
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
A__ : Any = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
A__ : Dict = 0
A__ : Optional[int] = 2
while digits < n:
index += 1
A__ : Dict = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10_00 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 55
| 0
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=2 , UpperCamelCase__=24 , UpperCamelCase__=16 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=None , UpperCamelCase__=2 , UpperCamelCase__=2 , ):
A__ : Dict = parent
A__ : Tuple = batch_size
A__ : int = patch_size
A__ : int = max_length
A__ : Tuple = num_mel_bins
A__ : List[Any] = is_training
A__ : List[Any] = use_labels
A__ : Any = hidden_size
A__ : Optional[int] = num_hidden_layers
A__ : List[Any] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : List[str] = hidden_act
A__ : Dict = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : Dict = type_sequence_label_size
A__ : List[Any] = initializer_range
A__ : List[str] = scope
A__ : List[Any] = frequency_stride
A__ : str = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A__ : str = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
A__ : int = (self.max_length - self.patch_size) // self.time_stride + 1
A__ : Any = frequency_out_dimension * time_out_dimension
A__ : Dict = num_patches + 2
def __snake_case ( self ):
A__ : str = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
A__ : Tuple = None
if self.use_labels:
A__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[Any] = self.get_config()
return config, input_values, labels
def __snake_case ( self ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] = ASTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self ):
A__ : Any = self.prepare_config_and_inputs()
(
A__
) : str = config_and_inputs
A__ : Optional[int] = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def __snake_case ( self ):
A__ : Union[str, Any] = ASTModelTester(self )
A__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def __snake_case ( self ):
pass
def __snake_case ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Tuple = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = model_class(UpperCamelCase__ )
A__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Dict = [*signature.parameters.keys()]
A__ : List[Any] = ['''input_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : int = ASTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Tuple = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
A__ : List[Any] = torchaudio.load(__UpperCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def __snake_case ( self ):
A__ : Union[str, Any] = self.default_feature_extractor
A__ : Optional[Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(UpperCamelCase__ )
A__ : str = self.default_feature_extractor
A__ : Optional[int] = prepare_audio()
A__ : Optional[Any] = audio.squeeze().numpy()
A__ : Any = feature_extractor(UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Optional[Any] = model(**UpperCamelCase__ )
# verify the logits
A__ : Optional[int] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : str = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 710
|
_SCREAMING_SNAKE_CASE : List[str] = range(2, 2_0 + 1)
_SCREAMING_SNAKE_CASE : Optional[Any] = [1_0**k for k in range(ks[-1] + 1)]
_SCREAMING_SNAKE_CASE : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
A__ : Tuple = sum(a_i[j] for j in range(__UpperCamelCase , len(__UpperCamelCase ) ) )
A__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) , __UpperCamelCase ) ) )
A__ , A__ : Optional[int] = 0, 0
A__ : List[Any] = n - i
A__ : Any = memo.get(__UpperCamelCase )
if sub_memo is not None:
A__ : Optional[int] = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
A__ : List[Any] = -1
for _k in range(len(__UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ : List[str] = _k
break
if max_jump >= 0:
A__ , A__ , A__ : List[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
A__ : int = diff + c
for j in range(min(__UpperCamelCase , len(__UpperCamelCase ) ) ):
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
if new_c > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
A__ : List[Any] = []
else:
A__ : Optional[Any] = {c: []}
A__ : int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ , A__ : str = next_term(__UpperCamelCase , k - 1 , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ , A__ : str = compute(__UpperCamelCase , __UpperCamelCase , i + dn , __UpperCamelCase )
diff += _diff
dn += terms_jumped
A__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ : List[Any] = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ : Optional[Any] = i
A__ , A__ , A__ : Dict = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ : int = ds_c + ds_b
diff += addend
A__ : List[Any] = 0
for j in range(__UpperCamelCase ):
A__ : Optional[Any] = a_i[j] + addend
A__ , A__ : List[str] = divmod(__UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
A__ : Any = digits[j] + addend
if s >= 10:
A__ , A__ : Union[str, Any] = divmod(__UpperCamelCase , 10 )
A__ : Optional[int] = addend // 10 + quotient
else:
A__ : Any = s
A__ : Dict = addend // 10
if addend == 0:
break
while addend > 0:
A__ , A__ : Dict = divmod(__UpperCamelCase , 10 )
digits.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**15 ) -> int:
"""simple docstring"""
A__ : List[Any] = [1]
A__ : Dict = 1
A__ : Tuple = 0
while True:
A__ , A__ : List[str] = next_term(__UpperCamelCase , 20 , i + dn , __UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
A__ : List[str] = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 55
| 0
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCamelCase__ :
'''simple docstring'''
def __snake_case ( self ):
torch.manual_seed(0 )
A__ : int = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
A__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
A__ : Tuple = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A__ : Any = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=UpperCamelCase__ , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
A__ : Optional[int] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __snake_case ( self ):
torch.manual_seed(0 )
A__ : Dict = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
A__ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
A__ : str = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A__ : int = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=UpperCamelCase__ , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
A__ : List[Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
A__ : str = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __snake_case ( self ):
A__ : int = self.get_dummy_components()
A__ : Tuple = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : List[str] = self.get_dummy_inputs(UpperCamelCase__ )
A__ : Optional[int] = inputs['''prompt''']
A__ : Optional[int] = inputs['''generator''']
A__ : int = inputs['''num_inference_steps''']
A__ : Optional[Any] = inputs['''output_type''']
if "image" in inputs:
A__ : Optional[Any] = inputs['''image''']
else:
A__ : Dict = None
if "mask_image" in inputs:
A__ : List[str] = inputs['''mask_image''']
else:
A__ : List[str] = None
if "original_image" in inputs:
A__ : Optional[int] = inputs['''original_image''']
else:
A__ : Tuple = None
A__ : int = pipe.encode_prompt(UpperCamelCase__ )
# inputs with prompt converted to embeddings
A__ : List[str] = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
A__ : Dict = image
if mask_image is not None:
A__ : str = mask_image
if original_image is not None:
A__ : List[str] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : int = pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F"`{optional_component}` did not stay set to None after loading." , )
A__ : int = self.get_dummy_inputs(UpperCamelCase__ )
A__ : Optional[int] = inputs['''generator''']
A__ : str = inputs['''num_inference_steps''']
A__ : Optional[int] = inputs['''output_type''']
# inputs with prompt converted to embeddings
A__ : List[str] = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
A__ : List[Any] = image
if mask_image is not None:
A__ : Union[str, Any] = mask_image
if original_image is not None:
A__ : List[str] = original_image
A__ : Any = pipe_loaded(**UpperCamelCase__ )[0]
A__ : int = np.abs(to_np(UpperCamelCase__ ) - to_np(UpperCamelCase__ ) ).max()
self.assertLess(UpperCamelCase__ , 1e-4 )
def __snake_case ( self ):
A__ : Optional[Any] = self.get_dummy_components()
A__ : List[Any] = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : int = self.get_dummy_inputs(UpperCamelCase__ )
A__ : str = pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
A__ : Optional[int] = self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
A__ : List[Any] = self.get_dummy_inputs(UpperCamelCase__ )
A__ : str = pipe_loaded(**UpperCamelCase__ )[0]
A__ : Optional[int] = np.abs(to_np(UpperCamelCase__ ) - to_np(UpperCamelCase__ ) ).max()
self.assertLess(UpperCamelCase__ , 1e-4 )
| 711
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=False ) -> Tuple:
"""simple docstring"""
try:
A__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
A__ : Union[str, Any] = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if test_case is None:
return partial(__UpperCamelCase , version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , __UpperCamelCase ) , F"test requires torch version >= {version}" )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__UpperCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = True
@classmethod
def __snake_case ( cls ):
A__ : Tuple = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Any:
"""simple docstring"""
A__ : int = AcceleratorState()
A__ : Any = tensor[None].clone().to(state.device )
A__ : Optional[int] = gather(__UpperCamelCase ).cpu()
A__ : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCamelCase ):
return False
return True
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = returncode
A__ : Union[str, Any] = stdout
A__ : Dict = stderr
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
while True:
A__ : Tuple = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) )
A__ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ : List[Any] = []
A__ : str = []
def tee(__UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any]="" ):
A__ : Optional[Any] = line.decode('''utf-8''' ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=1_80 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
A__ : Dict = asyncio.get_event_loop()
A__ : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
A__ : Union[str, Any] = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
A__ : Optional[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=False ) -> Dict:
"""simple docstring"""
try:
A__ : List[Any] = subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase , '''decode''' ):
A__ : Any = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 55
| 0
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "laptop" ) -> DataFrame:
"""simple docstring"""
A__ : List[Any] = F"https://www.amazon.in/laptop/s?k={product}"
A__ : Dict = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
A__ : Any = BeautifulSoup(requests.get(__UpperCamelCase , headers=__UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A__ : Optional[Any] = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
A__ : Optional[int] = item.ha.text
A__ : Optional[Any] = '''https://www.amazon.in/''' + item.ha.a['''href''']
A__ : List[Any] = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
A__ : Optional[int] = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
A__ : str = '''Not available'''
try:
A__ : Optional[int] = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
A__ : Any = ''''''
try:
A__ : Union[str, Any] = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_00 )
except ValueError:
A__ : Any = float('''nan''' )
except AttributeError:
pass
A__ : Dict = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A__ : Tuple = ''' '''
A__ : Union[str, Any] = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = 'headphones'
get_amazon_product_data(product).to_csv(f"""Amazon Product Data for {product}.csv""")
| 712
|
import numpy as np
_SCREAMING_SNAKE_CASE : Any = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : List[Any] = np.array(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ , A__ : Any = np.where(letter == self.SQUARE )
A__ : int = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = message.lower()
A__ : str = message.replace(''' ''' , '''''' )
A__ : Union[str, Any] = message.replace('''j''' , '''i''' )
A__ : List[Any] = np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : Any = self.letter_to_numbers(message[letter_index] )
A__ : Optional[Any] = numbers[0]
A__ : List[str] = numbers[1]
A__ : List[str] = first_step.reshape(2 * len(UpperCamelCase__ ) )
A__ : List[Any] = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Dict = int(second_step[numbers_index * 2] )
A__ : List[str] = int(second_step[(numbers_index * 2) + 1] )
A__ : Dict = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = encoded_message + letter
return encoded_message
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = message.lower()
message.replace(''' ''' , '''''' )
A__ : List[Any] = np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : List[str] = self.letter_to_numbers(message[letter_index] )
A__ : Dict = numbers[0]
A__ : int = numbers[1]
A__ : Optional[Any] = first_step.reshape((2, len(UpperCamelCase__ )) )
A__ : int = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Tuple = int(second_step[0, numbers_index] )
A__ : Dict = int(second_step[1, numbers_index] )
A__ : List[str] = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = decoded_message + letter
return decoded_message
| 55
| 0
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=30 , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=2 , ):
A__ : Any = parent
A__ : str = batch_size
A__ : Optional[int] = image_size
A__ : Tuple = patch_size
A__ : Dict = num_channels
A__ : Optional[Any] = is_training
A__ : List[str] = use_labels
A__ : int = hidden_size
A__ : Optional[Any] = num_hidden_layers
A__ : Union[str, Any] = num_attention_heads
A__ : str = intermediate_size
A__ : Any = hidden_act
A__ : List[str] = hidden_dropout_prob
A__ : int = attention_probs_dropout_prob
A__ : List[str] = type_sequence_label_size
A__ : Dict = initializer_range
A__ : Any = scope
A__ : Dict = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A__ : Dict = (image_size // patch_size) ** 2
A__ : Dict = num_patches + 2
def __snake_case ( self ):
A__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Optional[Any] = None
if self.use_labels:
A__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[str] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Dict = TFDeiTModel(config=UpperCamelCase__ )
A__ : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = TFDeiTForMaskedImageModeling(config=UpperCamelCase__ )
A__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A__ : Optional[Any] = 1
A__ : Any = TFDeiTForMaskedImageModeling(UpperCamelCase__ )
A__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = self.type_sequence_label_size
A__ : str = TFDeiTForImageClassification(UpperCamelCase__ )
A__ : Any = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ : Optional[Any] = 1
A__ : Optional[int] = TFDeiTForImageClassification(UpperCamelCase__ )
A__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ : Optional[int] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self ):
A__ : Dict = self.prepare_config_and_inputs()
A__ : Tuple = config_and_inputs
A__ : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self ):
A__ : int = TFDeiTModelTester(self )
A__ : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __snake_case ( self ):
pass
def __snake_case ( self ):
A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[Any] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Dense ) )
def __snake_case ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[Any] = model_class(UpperCamelCase__ )
A__ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : List[str] = [*signature.parameters.keys()]
A__ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def __snake_case ( self ):
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Any = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __snake_case ( self ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : List[str] = TFDeiTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
A__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __snake_case ( self ):
A__ : Any = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
A__ : Optional[int] = self.default_image_processor
A__ : Dict = prepare_img()
A__ : Any = image_processor(images=UpperCamelCase__ , return_tensors='''tf''' )
# forward pass
A__ : str = model(**UpperCamelCase__ )
# verify the logits
A__ : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : List[str] = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 713
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 55
| 0
|
from PIL import Image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Image , __UpperCamelCase : float ) -> Image:
"""simple docstring"""
def brightness(__UpperCamelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_55.0 <= level <= 2_55.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_SCREAMING_SNAKE_CASE : Dict = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 714
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : str = 1_6
_SCREAMING_SNAKE_CASE : Tuple = 3_2
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__UpperCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : Optional[int] = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : int = 16
elif accelerator.mixed_precision != "no":
A__ : Any = 8
else:
A__ : Union[str, Any] = None
return tokenizer.pad(
__UpperCamelCase , padding='''longest''' , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
A__ : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : Dict = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __UpperCamelCase ) == "1":
A__ : List[str] = 2
# Initialize accelerator
A__ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Tuple = config['''lr''']
A__ : Dict = int(config['''num_epochs'''] )
A__ : int = int(config['''seed'''] )
A__ : Optional[Any] = int(config['''batch_size'''] )
A__ : int = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
A__ : Dict = MAX_GPU_BATCH_SIZE
set_seed(__UpperCamelCase )
A__ , A__ : int = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
A__ : Optional[int] = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
A__ : Any = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Dict = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ : Dict = model(**__UpperCamelCase )
A__ : Dict = outputs.loss
A__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
A__ : Optional[int] = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Union[str, Any] = model(**__UpperCamelCase )
A__ : int = outputs.logits.argmax(dim=-1 )
A__ , A__ : Optional[Any] = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__UpperCamelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
A__ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
A__ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A__ : Dict = parser.parse_args()
A__ : Any = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 0
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : NDArray[floataa] , __UpperCamelCase : NDArray[floataa] , __UpperCamelCase : list[int] , __UpperCamelCase : int , ) -> list[float]:
"""simple docstring"""
A__ : Tuple = coefficient_matrix.shape
A__ : List[str] = constant_matrix.shape
if rowsa != colsa:
A__ : Optional[Any] = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(__UpperCamelCase )
if colsa != 1:
A__ : Any = F"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(__UpperCamelCase )
if rowsa != rowsa:
A__ : Union[str, Any] = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(__UpperCamelCase )
if len(__UpperCamelCase ) != rowsa:
A__ : Dict = (
'''Number of initial values must be equal to number of rows in coefficient '''
F"matrix but received {len(__UpperCamelCase )} and {rowsa}"
)
raise ValueError(__UpperCamelCase )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
A__ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
A__ : Union[str, Any] = table.shape
strictly_diagonally_dominant(__UpperCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(__UpperCamelCase ):
A__ : str = []
for row in range(__UpperCamelCase ):
A__ : Any = 0
for col in range(__UpperCamelCase ):
if col == row:
A__ : List[str] = table[row][col]
elif col == cols - 1:
A__ : Union[str, Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A__ : List[Any] = (temp + val) / denom
new_val.append(__UpperCamelCase )
A__ : Any = new_val
return [float(__UpperCamelCase ) for i in new_val]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : NDArray[floataa] ) -> bool:
"""simple docstring"""
A__ : Union[str, Any] = table.shape
A__ : Union[str, Any] = True
for i in range(0 , __UpperCamelCase ):
A__ : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "microsoft/speecht5_tts"
_lowerCAmelCase = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
_lowerCAmelCase = "text_reader"
_lowerCAmelCase = SpeechTaProcessor
_lowerCAmelCase = SpeechTaForTextToSpeech
_lowerCAmelCase = SpeechTaHifiGan
_lowerCAmelCase = ["text"]
_lowerCAmelCase = ["audio"]
def __snake_case ( self ):
if self.post_processor is None:
A__ : int = '''microsoft/speecht5_hifigan'''
super().setup()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
A__ : List[Any] = self.pre_processor(text=UpperCamelCase__ , return_tensors='''pt''' , truncation=UpperCamelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
A__ : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
A__ : Dict = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
with torch.no_grad():
return self.post_processor(UpperCamelCase__ ).cpu().detach()
| 55
| 0
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_SCREAMING_SNAKE_CASE : int = get_tests_dir('fixtures/vocab.json')
_SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures')
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def __snake_case ( self ):
A__ : List[Any] = 0
def __snake_case ( self ):
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[Any] = WavaVecaConfig()
A__ : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
A__ : Any = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Dict = WavaVecaFeatureExtractor()
A__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : Optional[int] = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : str = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : Optional[int] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Optional[int] = WavaVecaFeatureExtractor()
A__ : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A__ : str = WavaVecaProcessor(UpperCamelCase__ , UpperCamelCase__ )
# save in new folder
processor.save_pretrained(UpperCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''r''' ) as f:
A__ : List[Any] = json.load(UpperCamelCase__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write(json.dumps(UpperCamelCase__ ) )
A__ : List[Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(UpperCamelCase__ )
# copy relevant files
copyfile(UpperCamelCase__ , os.path.join(UpperCamelCase__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , '''w''' ) as f:
f.write('''{}''' )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A__ : str = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
A__ : int = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A__ : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A__ : List[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A__ : Dict = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
A__ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __snake_case ( self ):
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : str = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : str = CustomTokenizer(UpperCamelCase__ )
A__ : Optional[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(UpperCamelCase__ )
A__ : Union[str, Any] = AutoProcessor.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = False
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "AutoFeatureExtractor"
_lowerCAmelCase = "AutoTokenizer"
_lowerCAmelCase = False
try:
AutoConfig.register('''custom''' , UpperCamelCase__ )
AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoProcessor.register(UpperCamelCase__ , UpperCamelCase__ )
# If remote code is not set, the default is to use local classes.
A__ : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A__ : Any = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A__ : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCamelCase__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __snake_case ( self ):
A__ : str = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __snake_case ( self ):
A__ : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __snake_case ( cls ):
A__ : List[str] = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __snake_case ( self ):
A__ : Optional[Any] = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A__ : List[Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
A__ : int = WavaVecaProcessor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(UpperCamelCase__ , '''test-processor-org''' ) , push_to_hub=UpperCamelCase__ , use_auth_token=self._token , organization='''valid_org''' , )
A__ : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(new_processor.feature_extractor , UpperCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __snake_case ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : List[Any] = os.path.join(UpperCamelCase__ , '''vocab.txt''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A__ : Union[str, Any] = CustomTokenizer(UpperCamelCase__ )
A__ : List[Any] = CustomProcessor(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
A__ : Union[str, Any] = Repository(UpperCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(UpperCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) ) as f:
A__ : Optional[int] = json.load(UpperCamelCase__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A__ : Tuple = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 716
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'gpt-neox-20b': 2_0_4_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
A__ : List[Any] = add_prefix_space
A__ : Any = pre_tok_class(**UpperCamelCase__ )
A__ : List[Any] = add_prefix_space
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
A__ : Any = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
A__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 55
| 0
|
_SCREAMING_SNAKE_CASE : Union[str, Any] = {str(digit): digit**5 for digit in range(1_0)}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__UpperCamelCase ) )
def SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(__UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 717
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "deformable_detr"
_lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=3 , UpperCamelCase__=300 , UpperCamelCase__=1024 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=6 , UpperCamelCase__=1024 , UpperCamelCase__=8 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=256 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1.0 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="sine" , UpperCamelCase__="resnet50" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=False , UpperCamelCase__=300 , UpperCamelCase__=False , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=1 , UpperCamelCase__=5 , UpperCamelCase__=2 , UpperCamelCase__=0.1 , UpperCamelCase__=0.2_5 , UpperCamelCase__=False , **UpperCamelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A__ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = backbone_config.get('''model_type''' )
A__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
A__ : Optional[int] = config_class.from_dict(UpperCamelCase__ )
A__ : Tuple = use_timm_backbone
A__ : int = backbone_config
A__ : List[Any] = num_channels
A__ : List[Any] = num_queries
A__ : str = max_position_embeddings
A__ : Tuple = d_model
A__ : int = encoder_ffn_dim
A__ : Union[str, Any] = encoder_layers
A__ : Optional[Any] = encoder_attention_heads
A__ : List[Any] = decoder_ffn_dim
A__ : Tuple = decoder_layers
A__ : Optional[Any] = decoder_attention_heads
A__ : List[str] = dropout
A__ : str = attention_dropout
A__ : List[Any] = activation_dropout
A__ : Any = activation_function
A__ : Optional[Any] = init_std
A__ : Union[str, Any] = init_xavier_std
A__ : Union[str, Any] = encoder_layerdrop
A__ : Optional[int] = auxiliary_loss
A__ : str = position_embedding_type
A__ : List[Any] = backbone
A__ : Optional[Any] = use_pretrained_backbone
A__ : Any = dilation
# deformable attributes
A__ : List[Any] = num_feature_levels
A__ : List[str] = encoder_n_points
A__ : int = decoder_n_points
A__ : List[Any] = two_stage
A__ : Dict = two_stage_num_proposals
A__ : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
A__ : List[str] = class_cost
A__ : List[Any] = bbox_cost
A__ : Any = giou_cost
# Loss coefficients
A__ : List[str] = mask_loss_coefficient
A__ : Union[str, Any] = dice_loss_coefficient
A__ : List[Any] = bbox_loss_coefficient
A__ : Tuple = giou_loss_coefficient
A__ : Optional[Any] = eos_coefficient
A__ : List[Any] = focal_alpha
A__ : List[str] = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.encoder_attention_heads
@property
def __snake_case ( self ):
return self.d_model
def __snake_case ( self ):
A__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ : Tuple = self.backbone_config.to_dict()
A__ : Optional[int] = self.__class__.model_type
return output
| 55
| 0
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "isbn/0140328726" ) -> dict:
"""simple docstring"""
A__ : List[str] = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
A__ : str = F"{olid} is not a valid Open Library olid"
raise ValueError(__UpperCamelCase )
return requests.get(F"https://openlibrary.org/{new_olid}.json" ).json()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : dict ) -> dict:
"""simple docstring"""
A__ : int = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
A__ : Union[str, Any] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A__ : int = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
A__ : int = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A__ : Union[str, Any] = ''', '''.join(__UpperCamelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_SCREAMING_SNAKE_CASE : Optional[int] = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(f"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(f"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
_SCREAMING_SNAKE_CASE : Dict = summarize_book(get_openlibrary_data(f"""isbn/{isbn}"""))
print('\n'.join(f"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f"""Sorry, there are no results for ISBN: {isbn}.""")
| 718
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
A__ : Optional[Any] = 0
A__ : Optional[Any] = len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
if len(__UpperCamelCase ) <= 1:
return arr, 0
A__ : Optional[int] = len(__UpperCamelCase ) // 2
A__ : List[str] = arr[0:mid]
A__ : Union[str, Any] = arr[mid:]
A__ , A__ : List[Any] = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
A__ , A__ : Dict = _count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
A__ : Any = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
A__ : str = []
A__ : Tuple = 0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A__ : int = count_inversions_bf(__UpperCamelCase )
A__ , A__ : int = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A__ : Optional[Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Dict = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
# an empty list should also have zero inversions
A__ : Union[str, Any] = []
A__ : Union[str, Any] = count_inversions_bf(__UpperCamelCase )
A__ , A__ : Any = count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCamelCase )
if __name__ == "__main__":
main()
| 55
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = '▁'
_SCREAMING_SNAKE_CASE : Optional[int] = {'vocab_file': 'spiece.model'}
_SCREAMING_SNAKE_CASE : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
_SCREAMING_SNAKE_CASE : List[str] = {
'google/reformer-crime-and-punishment': 5_2_4_2_8_8,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__ , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__=[] , UpperCamelCase__ = None , **UpperCamelCase__ , ):
A__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : Tuple = vocab_file
A__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.sp_model.get_piece_size()
def __snake_case ( self ):
A__ : Dict = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
A__ : Tuple = self.__dict__.copy()
A__ : List[str] = None
return state
def __setstate__( self , UpperCamelCase__ ):
A__ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A__ : int = {}
A__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self , UpperCamelCase__ ):
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
return self.sp_model.piece_to_id(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
if index < self.sp_model.get_piece_size():
A__ : Dict = self.sp_model.IdToPiece(UpperCamelCase__ )
return token
def __snake_case ( self , UpperCamelCase__ ):
A__ : int = []
A__ : str = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
A__ : Any = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : str = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
A__ : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 719
|
from PIL import Image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Image , __UpperCamelCase : float ) -> Image:
"""simple docstring"""
def brightness(__UpperCamelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_SCREAMING_SNAKE_CASE : Dict = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 55
| 0
|
from math import factorial
_SCREAMING_SNAKE_CASE : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__UpperCamelCase ) )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 60 , __UpperCamelCase : int = 1_00_00_00 ) -> int:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
A__ : List[Any] = 0
# the cached sizes of the previous chains
A__ : dict[int, int] = {}
for start_chain_element in range(1 , __UpperCamelCase ):
# The temporary set will contain the elements of the chain
A__ : List[Any] = set()
A__ : int = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
A__ : Optional[int] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__UpperCamelCase )
chain_set_length += 1
A__ : List[str] = digit_factorial_sum(__UpperCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
A__ : Dict = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution()}""")
| 720
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
def __snake_case ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
A__ : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
A__ : Dict = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
A__ : Optional[int] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : str = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 55
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def __snake_case ( UpperCamelCase__ ):
raise NotImplementedError()
@abstractmethod
def __snake_case ( self ):
raise NotImplementedError()
| 721
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_SCREAMING_SNAKE_CASE : Union[str, Any] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_SCREAMING_SNAKE_CASE : Tuple = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_SCREAMING_SNAKE_CASE : Optional[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , ):
A__ : List[Any] = len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
A__ : Dict = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
A__ : Optional[Any] = TER(
normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , )
A__ : str = sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 55
| 0
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_SCREAMING_SNAKE_CASE : List[Any] = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
_SCREAMING_SNAKE_CASE : List[str] = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
_SCREAMING_SNAKE_CASE : str = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : int ) -> Union[str, Any]:
"""simple docstring"""
A__ : Optional[int] = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
A__ : Optional[Any] = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Tuple ) -> int:
"""simple docstring"""
A__ : List[str] = float(pearsonr(__UpperCamelCase , __UpperCamelCase )[0] )
A__ : Optional[int] = float(spearmanr(__UpperCamelCase , __UpperCamelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(UpperCamelCase__ , UpperCamelCase__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(UpperCamelCase__ , UpperCamelCase__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(UpperCamelCase__ , UpperCamelCase__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 700
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
# TODO Update this
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = "esm"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1026 , UpperCamelCase__=0.0_2 , UpperCamelCase__=1e-12 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[Any] = vocab_size
A__ : int = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : str = intermediate_size
A__ : List[str] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : int = max_position_embeddings
A__ : List[str] = initializer_range
A__ : List[Any] = layer_norm_eps
A__ : int = position_embedding_type
A__ : Optional[Any] = use_cache
A__ : Optional[int] = emb_layer_norm_before
A__ : List[str] = token_dropout
A__ : Tuple = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
A__ : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[int] = EsmFoldConfig(**UpperCamelCase__ )
A__ : int = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
A__ : Any = get_default_vocab_list()
else:
A__ : Dict = vocab_list
else:
A__ : Optional[Any] = None
A__ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __snake_case ( self ):
A__ : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
A__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 0
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.trunk is None:
A__ : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
A__ : List[Any] = TrunkConfig(**self.trunk )
def __snake_case ( self ):
A__ : Optional[int] = asdict(self )
A__ : int = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 48
_lowerCAmelCase = 1_024
_lowerCAmelCase = 128
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 32
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = False
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = None
def __snake_case ( self ):
if self.structure_module is None:
A__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
A__ : str = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A__ : Tuple = self.sequence_state_dim // self.sequence_head_width
A__ : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __snake_case ( self ):
A__ : List[Any] = asdict(self )
A__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = 384
_lowerCAmelCase = 128
_lowerCAmelCase = 16
_lowerCAmelCase = 128
_lowerCAmelCase = 12
_lowerCAmelCase = 4
_lowerCAmelCase = 8
_lowerCAmelCase = 0.1
_lowerCAmelCase = 8
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 7
_lowerCAmelCase = 10
_lowerCAmelCase = 1e-8
_lowerCAmelCase = 1e5
def __snake_case ( self ):
return asdict(self )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55
| 0
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCamelCase__ :
def __init__( self , UpperCamelCase__ = None ):
if components is None:
A__ : Optional[int] = []
A__ : Dict = list(UpperCamelCase__ )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self , UpperCamelCase__ ):
A__ : Optional[int] = len(self )
if size == len(UpperCamelCase__ ):
A__ : Optional[int] = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('''must have the same size''' )
def __sub__( self , UpperCamelCase__ ):
A__ : Optional[Any] = len(self )
if size == len(UpperCamelCase__ ):
A__ : List[Any] = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self , UpperCamelCase__ ):
...
@overload
def __mul__( self , UpperCamelCase__ ):
...
def __mul__( self , UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , (float, int) ):
A__ : Optional[Any] = [c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
A__ : Optional[int] = len(self )
A__ : List[Any] = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('''invalid operand!''' )
def __snake_case ( self ):
return Vector(self.__components )
def __snake_case ( self , UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
assert -len(self.__components ) <= pos < len(self.__components )
A__ : Tuple = value
def __snake_case ( self ):
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
A__ : Tuple = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = False ):
A__ : Dict = self * other
A__ : Any = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Vector:
"""simple docstring"""
assert isinstance(__UpperCamelCase , __UpperCamelCase )
return Vector([0] * dimension )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> Vector:
"""simple docstring"""
assert isinstance(__UpperCamelCase , __UpperCamelCase ) and (isinstance(__UpperCamelCase , __UpperCamelCase ))
A__ : Union[str, Any] = [0] * dimension
A__ : str = 1
return Vector(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : Vector , __UpperCamelCase : Vector ) -> Vector:
"""simple docstring"""
assert (
isinstance(__UpperCamelCase , __UpperCamelCase )
and isinstance(__UpperCamelCase , __UpperCamelCase )
and (isinstance(__UpperCamelCase , (int, float) ))
)
return x * scalar + y
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> Vector:
"""simple docstring"""
random.seed(__UpperCamelCase )
A__ : Optional[Any] = [random.randint(__UpperCamelCase , __UpperCamelCase ) for _ in range(__UpperCamelCase )]
return Vector(__UpperCamelCase )
class UpperCamelCase__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] = matrix
A__ : Optional[int] = w
A__ : Any = h
def __str__( self ):
A__ : Optional[int] = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , UpperCamelCase__ ):
if self.__width == other.width() and self.__height == other.height():
A__ : Dict = []
for i in range(self.__height ):
A__ : Any = [
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self , UpperCamelCase__ ):
if self.__width == other.width() and self.__height == other.height():
A__ : List[str] = []
for i in range(self.__height ):
A__ : int = [
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self , UpperCamelCase__ ):
...
@overload
def __mul__( self , UpperCamelCase__ ):
...
def __mul__( self , UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
A__ : Optional[Any] = zero_vector(self.__height )
for i in range(self.__height ):
A__ : Optional[Any] = [
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
A__ : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def __snake_case ( self ):
return self.__height
def __snake_case ( self ):
return self.__width
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A__ : Any = value
else:
raise Exception('''change_component: indices out of bounds''' )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
A__ : int = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
A__ : Any = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('''Indices out of bounds''' )
def __snake_case ( self ):
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A__ : Dict = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Matrix:
"""simple docstring"""
A__ : list[list[float]] = [[0] * n for _ in range(__UpperCamelCase )]
return Matrix(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> Matrix:
"""simple docstring"""
random.seed(__UpperCamelCase )
A__ : list[list[float]] = [
[random.randint(__UpperCamelCase , __UpperCamelCase ) for _ in range(__UpperCamelCase )] for _ in range(__UpperCamelCase )
]
return Matrix(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
| 701
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1024 , UpperCamelCase__=1024 , UpperCamelCase__=3.6 ):
A__ : str = tokenizer
A__ : int = tokenizer.bos_token_id
A__ : List[Any] = dataset
A__ : Tuple = seq_length
A__ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
A__ : Dict = iter(self.dataset )
A__ : Tuple = True
while more_examples:
A__ , A__ : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ : Dict = False
break
A__ : str = tokenizer(UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
A__ : Optional[int] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCamelCase__ ) , self.seq_length ):
A__ : Optional[int] = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase__ ) == self.seq_length:
yield torch.tensor(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
A__ : Any = {'''streaming''': True}
A__ : List[str] = load_dataset(args.dataset_name , split='''train''' , **__UpperCamelCase )
A__ : List[str] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
A__ : int = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
model.eval()
A__ : Dict = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
A__ : Any = model(__UpperCamelCase , labels=__UpperCamelCase )
A__ : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ : Tuple = torch.mean(torch.cat(__UpperCamelCase ) )
try:
A__ : Optional[Any] = torch.exp(__UpperCamelCase )
except OverflowError:
A__ : Union[str, Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_SCREAMING_SNAKE_CASE : List[Any] = Accelerator()
# Parse configuration
_SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(EvaluationArguments)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_SCREAMING_SNAKE_CASE : Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 55
| 0
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
'''simple docstring'''
_lowerCAmelCase = None
def __snake_case ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
A__ : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
A__ : Dict = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Any = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
A__ : Optional[int] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __snake_case ( self ):
A__ : str = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 702
|
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 55
| 0
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 2048-bit
1_4: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 3072-bit
1_5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 4096-bit
1_6: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 6144-bit
1_7: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 8192-bit
1_8: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
}
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ = 14 ):
if group not in primes:
raise ValueError('''Unsupported Group''' )
A__ : List[str] = primes[group]['''prime''']
A__ : Tuple = primes[group]['''generator''']
A__ : Optional[int] = int(hexlify(urandom(32 ) ) , base=16 )
def __snake_case ( self ):
return hex(self.__private_key )[2:]
def __snake_case ( self ):
A__ : Optional[Any] = pow(self.generator , self.__private_key , self.prime )
return hex(UpperCamelCase__ )[2:]
def __snake_case ( self , UpperCamelCase__ ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(UpperCamelCase__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = int(UpperCamelCase__ , base=16 )
if not self.is_valid_public_key(UpperCamelCase__ ):
raise ValueError('''Invalid public key''' )
A__ : List[Any] = pow(UpperCamelCase__ , self.__private_key , self.prime )
return shaaaa(str(UpperCamelCase__ ).encode() ).hexdigest()
@staticmethod
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(UpperCamelCase__ , (prime - 1) // 2 , UpperCamelCase__ ) == 1
)
@staticmethod
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 14 ):
A__ : Optional[Any] = int(UpperCamelCase__ , base=16 )
A__ : Any = int(UpperCamelCase__ , base=16 )
A__ : int = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Invalid public key''' )
A__ : Union[str, Any] = pow(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return shaaaa(str(UpperCamelCase__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
A__ : Dict = inspect.getfile(accelerate.test_utils )
A__ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A__ : Tuple = test_metrics
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __snake_case ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __snake_case ( self ):
self.test_metrics.main()
@require_multi_gpu
def __snake_case ( self ):
print(F"Found {torch.cuda.device_count()} devices." )
A__ : int = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 55
| 0
|
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ ):
A__ : int = val
A__ : Optional[Any] = None
A__ : str = None
def __snake_case ( self , UpperCamelCase__ ):
if self.val:
if val < self.val:
if self.left is None:
A__ : str = Node(UpperCamelCase__ )
else:
self.left.insert(UpperCamelCase__ )
elif val > self.val:
if self.right is None:
A__ : Dict = Node(UpperCamelCase__ )
else:
self.right.insert(UpperCamelCase__ )
else:
A__ : List[str] = val
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
if root:
inorder(root.left , __UpperCamelCase )
res.append(root.val )
inorder(root.right , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Tuple:
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
return arr
A__ : Any = Node(arr[0] )
for i in range(1 , len(__UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
A__ : Optional[Any] = []
inorder(__UpperCamelCase , __UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 704
|
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.