code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
"""simple docstring"""
def __init__( self : List[Any] ,snake_case : Tuple ,snake_case : int=13 ,snake_case : Any=32 ,snake_case : List[str]=3 ,snake_case : Optional[int]=4 ,snake_case : Optional[Any]=[10, 20, 30, 40] ,snake_case : List[str]=[2, 2, 3, 2] ,snake_case : Any=True ,snake_case : Optional[int]=True ,snake_case : List[Any]=37 ,snake_case : List[str]="gelu" ,snake_case : Optional[int]=10 ,snake_case : List[str]=0.02 ,snake_case : Optional[Any]=["stage2", "stage3", "stage4"] ,snake_case : int=[2, 3, 4] ,snake_case : Optional[Any]=None ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =num_stages
SCREAMING_SNAKE_CASE =hidden_sizes
SCREAMING_SNAKE_CASE =depths
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =out_features
SCREAMING_SNAKE_CASE =out_indices
SCREAMING_SNAKE_CASE =scope
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_labels )
SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : Any ):
return ConvNextConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=snake_case ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def _lowerCAmelCase ( self : Tuple ,snake_case : str ,snake_case : Union[str, Any] ,snake_case : Dict ):
SCREAMING_SNAKE_CASE =ConvNextModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def _lowerCAmelCase ( self : int ,snake_case : Optional[int] ,snake_case : Union[str, Any] ,snake_case : List[Any] ):
SCREAMING_SNAKE_CASE =ConvNextForImageClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Dict ,snake_case : List[Any] ,snake_case : Tuple ,snake_case : str ):
SCREAMING_SNAKE_CASE =ConvNextBackbone(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =ConvNextBackbone(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =ConvNextModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self : List[Any] ):
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def _lowerCAmelCase ( self : Tuple ):
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def _lowerCAmelCase ( self : Tuple ):
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def _lowerCAmelCase ( self : List[str] ):
pass
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] ,snake_case )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case )
def _lowerCAmelCase ( self : Dict ):
def check_hidden_states_output(snake_case : List[Any] ,snake_case : Optional[Any] ,snake_case : str ):
SCREAMING_SNAKE_CASE =model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(snake_case ,snake_case ) )
SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(snake_case ) ,expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =True
check_hidden_states_output(snake_case ,snake_case ,snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE =True
check_hidden_states_output(snake_case ,snake_case ,snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def _lowerCAmelCase ( self : Optional[int] ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =ConvNextModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : int ):
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(snake_case )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**snake_case )
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,snake_case ,atol=1e-4 ) )
@require_torch
class a_ ( unittest.TestCase , A_ ):
"""simple docstring"""
__UpperCAmelCase = (ConvNextBackbone,) if is_torch_available() else ()
__UpperCAmelCase = ConvNextConfig
__UpperCAmelCase = False
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =ConvNextModelTester(self )
| 334 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCAmelCase = None
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase = {
'''t5-small''': 5_1_2,
'''t5-base''': 5_1_2,
'''t5-large''': 5_1_2,
'''t5-3b''': 5_1_2,
'''t5-11b''': 5_1_2,
}
class A ( A_ ):
UpperCamelCase_ : Dict =VOCAB_FILES_NAMES
UpperCamelCase_ : Dict =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : str =['''input_ids''', '''attention_mask''']
UpperCamelCase_ : List[str] =TaTokenizer
UpperCamelCase_ : List[int] =[]
def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase=1_0_0 , lowerCAmelCase=None , **lowerCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowercase= [f'<extra_id_{i}>' for i in range(lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__lowercase= len(set(filter(lambda lowerCAmelCase : bool('extra_id_' in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= vocab_file
__lowercase= False if not self.vocab_file else True
__lowercase= extra_ids
@staticmethod
def _A (lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__lowercase= TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowerCAmelCase , )
return max_model_length
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase= os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ):
copyfile(self.vocab_file , lowerCAmelCase )
logger.info(f'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
__lowercase= token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__lowercase= token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
__lowercase= [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _A (self ):
return list(
set(filter(lambda lowerCAmelCase : bool(re.search(r'<extra_id_\d+>' , lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _A (self ):
return [self.convert_tokens_to_ids(lowerCAmelCase ) for token in self.get_sentinel_tokens()]
| 295 | 0 |
from __future__ import annotations
import numpy as np
def snake_case_ ( snake_case ) -> tuple[np.ndarray, np.ndarray]:
lowercase__ , lowercase__: Any = np.shape(lowercase__ )
if rows != columns:
lowercase__: List[str] = (
'\'table\' has to be of square shaped array but got a '
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(lowercase__ )
lowercase__: int = np.zeros((rows, columns) )
lowercase__: List[str] = np.zeros((rows, columns) )
for i in range(lowercase__ ):
for j in range(lowercase__ ):
lowercase__: str = sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
lowercase__: Optional[Any] = (table[i][j] - total) / upper[j][j]
lowercase__: Optional[Any] = 1
for j in range(lowercase__ , lowercase__ ):
lowercase__: List[str] = sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) )
lowercase__: str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 |
from collections.abc import Sequence
def _lowerCamelCase( lowercase__ , lowercase__ = False ) -> float:
'''simple docstring'''
if not arr:
return 0
__lowercase= 0 if allow_empty_subarrays else float('-inf' )
__lowercase= 0.0
for num in arr:
__lowercase= max(0 if allow_empty_subarrays else num , curr_sum + num )
__lowercase= max(lowercase__ , lowercase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 295 | 0 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def a ( __a ) -> int:
'''simple docstring'''
print('''Loading config file...''' )
def flatten_yaml_as_dict(__a , __a="" , __a="." ):
UpperCamelCase__ :int = []
for k, v in d.items():
UpperCamelCase__ :Optional[Any] = parent_key + sep + k if parent_key else k
if isinstance(lowercase__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowercase__ , lowercase__ , sep=lowercase__ ).items() )
else:
items.append((new_key, v) )
return dict(lowercase__ )
UpperCamelCase__ :str = argparse.Namespace()
with open(lowercase__ , '''r''' ) as yaml_file:
try:
UpperCamelCase__ :str = yaml.load(lowercase__ , Loader=yaml.FullLoader )
UpperCamelCase__ :Optional[Any] = flatten_yaml_as_dict(lowercase__ )
for k, v in flat_cfg.items():
setattr(lowercase__ , lowercase__ , lowercase__ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(lowercase__ , str(lowercase__ ) ) )
return config
def a ( __a , __a ) -> str:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = MobileViTVaConfig()
UpperCamelCase__ :Optional[int] = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
UpperCamelCase__ :List[str] = 1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCamelCase__ :Optional[Any] = 384
else:
UpperCamelCase__ :List[str] = 256
UpperCamelCase__ :Optional[Any] = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
UpperCamelCase__ :Any = 21000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
UpperCamelCase__ :Optional[int] = 384
else:
UpperCamelCase__ :Union[str, Any] = 256
UpperCamelCase__ :Any = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
UpperCamelCase__ :List[Any] = 151
UpperCamelCase__ :Any = 512
UpperCamelCase__ :Any = '''ade20k-id2label.json'''
UpperCamelCase__ :List[Any] = True
elif task_name.startswith('''voc_''' ):
UpperCamelCase__ :Union[str, Any] = 21
UpperCamelCase__ :str = 512
UpperCamelCase__ :Union[str, Any] = '''pascal-voc-id2label.json'''
UpperCamelCase__ :List[str] = True
# orig_config
UpperCamelCase__ :Optional[Any] = load_orig_config_file(lowercase__ )
assert getattr(lowercase__ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCamelCase__ :Union[str, Any] = getattr(lowercase__ , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(lowercase__ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCamelCase__ :Dict = getattr(lowercase__ , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCamelCase__ :Union[str, Any] = getattr(lowercase__ , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
UpperCamelCase__ :Optional[int] = getattr(lowercase__ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
UpperCamelCase__ :List[str] = getattr(lowercase__ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
UpperCamelCase__ :Any = getattr(lowercase__ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
UpperCamelCase__ :Any = '''huggingface/label-files'''
UpperCamelCase__ :Optional[int] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ :Dict = {int(lowercase__ ): v for k, v in idalabel.items()}
UpperCamelCase__ :List[Any] = idalabel
UpperCamelCase__ :int = {v: k for k, v in idalabel.items()}
return config
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
UpperCamelCase__ :str = dct.pop(lowercase__ )
UpperCamelCase__ :List[Any] = val
def a ( __a , __a=False ) -> Optional[Any]:
'''simple docstring'''
if base_model:
UpperCamelCase__ :Tuple = ''''''
else:
UpperCamelCase__ :List[Any] = '''mobilevitv2.'''
UpperCamelCase__ :int = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCamelCase__ :Optional[Any] = k[8:]
else:
UpperCamelCase__ :Tuple = k
if ".block." in k:
UpperCamelCase__ :List[Any] = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
UpperCamelCase__ :Tuple = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
UpperCamelCase__ :Any = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
UpperCamelCase__ :Union[str, Any] = k_new.replace('''conv_1.''' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
UpperCamelCase__ :Tuple = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
UpperCamelCase__ :Optional[Any] = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
UpperCamelCase__ :Optional[Any] = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
UpperCamelCase__ :Any = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
UpperCamelCase__ :int = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
UpperCamelCase__ :Optional[int] = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
UpperCamelCase__ :Optional[Any] = [0, 1]
elif i == 4:
UpperCamelCase__ :Union[str, Any] = [0, 1, 2, 3]
elif i == 5:
UpperCamelCase__ :Dict = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
UpperCamelCase__ :Any = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
UpperCamelCase__ :int = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
UpperCamelCase__ :Dict = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
UpperCamelCase__ :int = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
UpperCamelCase__ :Any = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
UpperCamelCase__ :Optional[Any] = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
UpperCamelCase__ :Union[str, Any] = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
UpperCamelCase__ :Any = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
UpperCamelCase__ :Any = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
UpperCamelCase__ :Tuple = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
UpperCamelCase__ :Dict = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
UpperCamelCase__ :Any = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def a ( __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :int = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(lowercase__ )
for k in keys_to_ignore:
state_dict.pop(lowercase__ , lowercase__ )
def a ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ :Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCamelCase__ :Tuple = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def a ( __a , __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Tuple = get_mobilevitva_config(lowercase__ , lowercase__ )
# load original state_dict
UpperCamelCase__ :Tuple = torch.load(lowercase__ , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
UpperCamelCase__ :int = MobileViTVaForSemanticSegmentation(lowercase__ ).eval()
UpperCamelCase__ :Dict = False
else:
UpperCamelCase__ :Dict = MobileViTVaForImageClassification(lowercase__ ).eval()
UpperCamelCase__ :List[Any] = False
# remove and rename some keys of load the original model
UpperCamelCase__ :Any = checkpoint
remove_unused_keys(lowercase__ )
UpperCamelCase__ :str = create_rename_keys(lowercase__ , base_model=lowercase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# load modified state_dict
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCamelCase__ :List[str] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCamelCase__ :Dict = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCamelCase__ :Union[str, Any] = model(**lowercase__ )
# verify classification model
if task_name.startswith('''imagenet''' ):
UpperCamelCase__ :Dict = outputs.logits
UpperCamelCase__ :Tuple = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCamelCase__ :str = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] )
assert torch.allclose(logits[0, :3] , lowercase__ , atol=1e-4 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
__snake_case = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
) | 97 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : Any =PriorTransformer
UpperCamelCase_ : List[str] ='''hidden_states'''
@property
def _A (self ):
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _A (self ):
return (4, 8)
@property
def _A (self ):
return (4, 8)
def _A (self ):
__lowercase= {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
__lowercase= self.dummy_input
return init_dict, inputs_dict
def _A (self ):
__lowercase, __lowercase= PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase )
__lowercase= model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _A (self ):
__lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common()
__lowercase= self.model_class(**lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowerCAmelCase )
def _A (self ):
__lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
__lowercase= model.to(lowerCAmelCase )
if hasattr(lowerCAmelCase , 'set_default_attn_processor' ):
model.set_default_attn_processor()
__lowercase= self.get_dummy_seed_input()
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
__lowercase= output[0, :5].flatten().cpu()
print(lowerCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) )
@slow
class A ( unittest.TestCase ):
def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= batch_size
__lowercase= embedding_dim
__lowercase= num_embeddings
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(lowerCAmelCase )
__lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase )
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
assert list(sample.shape ) == [1, 7_6_8]
__lowercase= sample[0, :8].flatten().cpu()
print(lowerCAmelCase )
__lowercase= torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
| 295 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase = pytest.mark.integration
@pytest.mark.parametrize('path' ,['paws', 'csv'] )
def UpperCamelCase ( __lowercase : Any ,__lowercase : Union[str, Any] ):
'''simple docstring'''
inspect_dataset(lowercase__ ,lowercase__ )
A_ : Tuple = path + '.py'
assert script_name in os.listdir(lowercase__ )
assert "__pycache__" not in os.listdir(lowercase__ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' ,['accuracy'] )
def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : Any ):
'''simple docstring'''
inspect_metric(lowercase__ ,lowercase__ )
A_ : str = path + '.py'
assert script_name in os.listdir(lowercase__ )
assert "__pycache__" not in os.listdir(lowercase__ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' ,[
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] ,)
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : List[Any] ,__lowercase : Optional[Any] ):
'''simple docstring'''
A_ : str = get_dataset_config_info(lowercase__ ,config_name=lowercase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' ,[
('paws', None, ValueError),
] ,)
def UpperCamelCase ( __lowercase : Dict ,__lowercase : Tuple ,__lowercase : Tuple ):
'''simple docstring'''
with pytest.raises(lowercase__ ):
get_dataset_config_info(lowercase__ ,config_name=lowercase__ )
@pytest.mark.parametrize(
'path, expected' ,[
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] ,)
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Dict ):
'''simple docstring'''
A_ : List[str] = get_dataset_config_names(lowercase__ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' ,[
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] ,)
def UpperCamelCase ( __lowercase : int ,__lowercase : Optional[Any] ,__lowercase : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = get_dataset_infos(lowercase__ )
assert list(infos.keys() ) == expected_configs
A_ : str = expected_configs[0]
assert expected_config in infos
A_ : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' ,[
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] ,)
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : List[str] ,__lowercase : str ):
'''simple docstring'''
A_ : int = get_dataset_infos(lowercase__ )
assert expected_config in infos
A_ : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' ,[
('paws', None, ValueError),
] ,)
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : List[str] ,__lowercase : List[str] ):
'''simple docstring'''
with pytest.raises(lowercase__ ):
get_dataset_split_names(lowercase__ ,config_name=lowercase__ )
| 140 |
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__lowercase= len(lowercase__ )
__lowercase= max(lowercase__ )
__lowercase= min(lowercase__ )
# create the counting array
__lowercase= coll_max + 1 - coll_min
__lowercase= [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
__lowercase= counting_arr[i] + counting_arr[i - 1]
# create the output collection
__lowercase= [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
__lowercase= collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 295 | 0 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : Dict = len(lowercase__ )
UpperCAmelCase__ : Optional[int] = sum(lowercase__ )
UpperCAmelCase__ : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
UpperCAmelCase__ : Union[str, Any] = True
for i in range(1 , s + 1 ):
UpperCAmelCase__ : List[Any] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
UpperCAmelCase__ : Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
UpperCAmelCase__ : int = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
UpperCAmelCase__ : Union[str, Any] = s - 2 * j
break
return diff
| 181 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class A ( A_ ):
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_mask
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= intermediate_size
__lowercase= hidden_act
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= scope
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= None
if self.use_input_mask:
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A (self ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DistilBertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DistilBertForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DistilBertForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= DistilBertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= DistilBertForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_choices
__lowercase= DistilBertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
((__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase))= config_and_inputs
__lowercase= {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Any =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ : Optional[int] =(
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =True
UpperCamelCase_ : str =True
UpperCamelCase_ : Union[str, Any] =True
UpperCamelCase_ : Optional[int] =True
def _A (self ):
__lowercase= DistilBertModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase )
@slow
def _A (self ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= DistilBertModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__lowercase= True
__lowercase= model_class(config=lowerCAmelCase )
__lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
__lowercase= torch.jit.trace(
lowerCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase , os.path.join(lowerCAmelCase , 'traced_model.pt' ) )
__lowercase= torch.jit.load(os.path.join(lowerCAmelCase , 'traced_model.pt' ) , map_location=lowerCAmelCase )
loaded(inputs_dict['input_ids'].to(lowerCAmelCase ) , inputs_dict['attention_mask'].to(lowerCAmelCase ) )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= DistilBertModel.from_pretrained('distilbert-base-uncased' )
__lowercase= torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowercase= torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
__lowercase= torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase )
__lowercase= torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 ) )
| 295 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : Any , snake_case_ : Any ) -> int:
'''simple docstring'''
return number | (1 << position)
def UpperCamelCase_ ( snake_case_ : List[str] , snake_case_ : Optional[int] ) -> int:
'''simple docstring'''
return number & ~(1 << position)
def UpperCamelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] ) -> int:
'''simple docstring'''
return number ^ (1 << position)
def UpperCamelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) -> bool:
'''simple docstring'''
return ((number >> position) & 1) == 1
def UpperCamelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Dict ) -> int:
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229 |
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= [False] * len(lowercase__ )
__lowercase= []
queue.append(lowercase__ )
__lowercase= True
while queue:
__lowercase= queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase__ )
__lowercase= True
__lowercase= u
return visited[t]
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [-1] * (len(lowercase__ ))
__lowercase= 0
while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__lowercase= float('Inf' )
__lowercase= sink
while s != source:
# Find the minimum value in select path
__lowercase= min(lowercase__ , graph[parent[s]][s] )
__lowercase= parent[s]
max_flow += path_flow
__lowercase= sink
while v != source:
__lowercase= parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowercase= parent[v]
return max_flow
lowerCAmelCase = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase ,lowerCAmelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 295 | 0 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCAmelCase__ = 100
lowerCAmelCase__ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCAmelCase__ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def _A ( A__ ):
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__lowercase = set()
__lowercase = 42
__lowercase = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _A ( A__ = 5000 ):
"""simple docstring"""
for number_to_partition in range(1 , lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 104 |
from __future__ import annotations
def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool:
'''simple docstring'''
__lowercase= get_failure_array(lowercase__ )
# 2) Step through text searching for pattern
__lowercase, __lowercase= 0, 0 # index into text, pattern
while i < len(lowercase__ ):
if pattern[j] == text[i]:
if j == (len(lowercase__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase= failure[j - 1]
continue
i += 1
return False
def _lowerCamelCase( lowercase__ ) -> list[int]:
'''simple docstring'''
__lowercase= [0]
__lowercase= 0
__lowercase= 1
while j < len(lowercase__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase= failure[i - 1]
continue
j += 1
failure.append(lowercase__ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCAmelCase = '''abc1abc12'''
lowerCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCAmelCase = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCAmelCase = '''ABABX'''
lowerCAmelCase = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCAmelCase = '''AAAB'''
lowerCAmelCase = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCAmelCase = '''abcdabcy'''
lowerCAmelCase = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCAmelCase = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 295 | 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE_:Any = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
SCREAMING_SNAKE_CASE_:Optional[int] = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
"""
SCREAMING_SNAKE_CASE_:Dict = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ), reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
], )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
return {
"matthews_correlation": float(matthews_corrcoef(lowerCamelCase__, lowerCamelCase__, sample_weight=lowerCamelCase__ ) ),
}
| 116 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 295 | 0 |
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = [0] * len(lowercase__ )
for i in range(1 , len(lowercase__ ) ):
# use last results for better performance - dynamic programming
_UpperCAmelCase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_UpperCAmelCase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_UpperCAmelCase = j
return prefix_result
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return max(prefix_function(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A ( enum.Enum ):
UpperCamelCase_ : Optional[int] =0
UpperCamelCase_ : Tuple =1
UpperCamelCase_ : Optional[int] =2
@add_end_docstrings(A_ )
class A ( A_ ):
UpperCamelCase_ : Union[str, Any] ='''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__(self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowercase= None
if self.model.config.prefix is not None:
__lowercase= self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowercase= self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowercase, __lowercase, __lowercase= self._sanitize_parameters(prefix=lowerCAmelCase , **self._forward_params )
__lowercase= {**self._preprocess_params, **preprocess_params}
__lowercase= {**self._forward_params, **forward_params}
def _A (self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ):
__lowercase= {}
if prefix is not None:
__lowercase= prefix
if prefix:
__lowercase= self.tokenizer(
lowerCAmelCase , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework )
__lowercase= prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
' [None, \'hole\']' )
__lowercase= handle_long_generation
preprocess_params.update(lowerCAmelCase )
__lowercase= generate_kwargs
__lowercase= {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
__lowercase= ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
__lowercase= ReturnType.TENSORS
if return_type is not None:
__lowercase= return_type
if clean_up_tokenization_spaces is not None:
__lowercase= clean_up_tokenization_spaces
if stop_sequence is not None:
__lowercase= self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
if len(lowerCAmelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowercase= stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _A (self , *lowerCAmelCase , **lowerCAmelCase ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase , **lowerCAmelCase )
def __call__(self , lowerCAmelCase , **lowerCAmelCase ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase="" , lowerCAmelCase=None , **lowerCAmelCase ):
__lowercase= self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework )
__lowercase= prompt_text
if handle_long_generation == "hole":
__lowercase= inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowercase= generate_kwargs['max_new_tokens']
else:
__lowercase= generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowercase= self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
__lowercase= inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
__lowercase= inputs['attention_mask'][:, -keep_length:]
return inputs
def _A (self , lowerCAmelCase , **lowerCAmelCase ):
__lowercase= model_inputs['input_ids']
__lowercase= model_inputs.get('attention_mask' , lowerCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowercase= None
__lowercase= None
__lowercase= 1
else:
__lowercase= input_ids.shape[0]
__lowercase= model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowercase= generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
__lowercase= 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowercase= generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowercase= 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowercase= self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase )
__lowercase= generated_sequence.shape[0]
if self.framework == "pt":
__lowercase= generated_sequence.reshape(lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowercase= tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _A (self , lowerCAmelCase , lowerCAmelCase=ReturnType.FULL_TEXT , lowerCAmelCase=True ):
__lowercase= model_outputs['generated_sequence'][0]
__lowercase= model_outputs['input_ids']
__lowercase= model_outputs['prompt_text']
__lowercase= generated_sequence.numpy().tolist()
__lowercase= []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowercase= {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowercase= self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowercase= 0
else:
__lowercase= len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) )
if return_type == ReturnType.FULL_TEXT:
__lowercase= prompt_text + text[prompt_length:]
else:
__lowercase= text[prompt_length:]
__lowercase= {'generated_text': all_text}
records.append(lowerCAmelCase )
return records
| 295 | 0 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__snake_case : Optional[Any] = '\nimport os\n'
__snake_case : Optional[Any] = '\ndef foo():\n import os\n return False\n'
__snake_case : Optional[int] = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
__snake_case : List[str] = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
__snake_case : Optional[Any] = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
__snake_case : Dict = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
__snake_case : int = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
__snake_case : Optional[int] = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
__snake_case : List[Any] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
__snake_case : Optional[Any] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
__snake_case : List[Any] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""", lowercase__ )
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : List[Any] ) -> str:
"""simple docstring"""
A__ : Optional[Any] =os.path.join(lowercase__, """test_file.py""" )
with open(lowercase__, """w""" ) as _tmp_file:
_tmp_file.write(lowercase__ )
A__ : Tuple =get_imports(lowercase__ )
assert parsed_imports == ["os"]
| 134 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
@register_to_config
def __init__(self , lowerCAmelCase = 3 , lowerCAmelCase = 3 , lowerCAmelCase = ("DownEncoderBlock2D",) , lowerCAmelCase = ("UpDecoderBlock2D",) , lowerCAmelCase = (6_4,) , lowerCAmelCase = 1 , lowerCAmelCase = "silu" , lowerCAmelCase = 3 , lowerCAmelCase = 3_2 , lowerCAmelCase = 2_5_6 , lowerCAmelCase = 3_2 , lowerCAmelCase = None , lowerCAmelCase = 0.1_82_15 , lowerCAmelCase = "group" , ):
super().__init__()
# pass init params to Encoder
__lowercase= Encoder(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , down_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , double_z=lowerCAmelCase , )
__lowercase= vq_embed_dim if vq_embed_dim is not None else latent_channels
__lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 )
__lowercase= VectorQuantizer(lowerCAmelCase , lowerCAmelCase , beta=0.25 , remap=lowerCAmelCase , sane_index_shape=lowerCAmelCase )
__lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 )
# pass init params to Decoder
__lowercase= Decoder(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , up_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , norm_type=lowerCAmelCase , )
@apply_forward_hook
def _A (self , lowerCAmelCase , lowerCAmelCase = True ):
__lowercase= self.encoder(lowerCAmelCase )
__lowercase= self.quant_conv(lowerCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase )
@apply_forward_hook
def _A (self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ):
# also go through quantization layer
if not force_not_quantize:
__lowercase, __lowercase, __lowercase= self.quantize(lowerCAmelCase )
else:
__lowercase= h
__lowercase= self.post_quant_conv(lowerCAmelCase )
__lowercase= self.decoder(lowerCAmelCase , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = True ):
__lowercase= sample
__lowercase= self.encode(lowerCAmelCase ).latents
__lowercase= self.decode(lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase )
| 295 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE ( A_ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
def __init__( self : List[Any] ,lowercase_ : List[Any] ,lowercase_ : str ):
super().__init__()
self.register_modules(unet=lowercase_ ,scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : Dict ,lowercase_ : Union[str, Any] = 1 ,lowercase_ : Optional[Any] = 5_0 ,lowercase_ : Optional[Any] = None ,lowercase_ : Optional[Any] = "pil" ,lowercase_ : Dict = True ,**lowercase_ : List[Any] ,):
lowerCAmelCase__ : Union[str, Any] = self.unet.config.sample_size
lowerCAmelCase__ : Dict = (batch_size, 3, img_size, img_size)
lowerCAmelCase__ : str = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
lowerCAmelCase__ : List[Any] = randn_tensor(lowercase_ ,generator=lowercase_ ,device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
lowerCAmelCase__ : Optional[Any] = self.scheduler.schedule[t]
lowerCAmelCase__ : Dict = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = self.scheduler.add_noise_to_input(lowercase_ ,lowercase_ ,generator=lowercase_ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
lowerCAmelCase__ : Tuple = (sigma_hat / 2) * model((sample_hat + 1) / 2 ,sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
lowerCAmelCase__ : Optional[int] = self.scheduler.step(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
lowerCAmelCase__ : str = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 ,sigma_prev / 2 ).sample
lowerCAmelCase__ : Any = self.scheduler.step_correct(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,step_output.prev_sample ,step_output['''derivative'''] ,)
lowerCAmelCase__ : Tuple = step_output.prev_sample
lowerCAmelCase__ : Optional[Any] = (sample / 2 + 0.5).clamp(0 ,1 )
lowerCAmelCase__ : List[Any] = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ : Union[str, Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 106 |
import os
import numpy
import onnx
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= a.name
__lowercase= b.name
__lowercase= ''
__lowercase= ''
__lowercase= a == b
__lowercase= name_a
__lowercase= name_b
return res
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase__ , lowercase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
_graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(lowercase__ , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= list(model.graph.initializer )
__lowercase= list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__lowercase= inits[i].name
__lowercase= inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= os.path.dirname(lowercase__ )
__lowercase= os.path.basename(lowercase__ )
__lowercase= onnx.load(os.path.join(lowercase__ , lowercase__ ) )
__lowercase= list(model.graph.initializer )
__lowercase= set()
__lowercase= {}
__lowercase= []
__lowercase= 0
for i in range(len(lowercase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowercase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowercase__ )
dup_set.add(lowercase__ )
__lowercase= inits[j].data_type
__lowercase= numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowercase__ )
total_reduced_size += mem_size
__lowercase= inits[i].name
__lowercase= inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase__ )
else:
__lowercase= [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
__lowercase= sorted(lowercase__ )
_remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ )
__lowercase= 'optimized_' + model_file_name
__lowercase= os.path.join(lowercase__ , lowercase__ )
onnx.save(lowercase__ , lowercase__ )
return new_model
| 295 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE ='| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
SCREAMING_SNAKE_CASE =dict(zip(snake_case ,range(len(snake_case ) ) ) )
SCREAMING_SNAKE_CASE ={
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
SCREAMING_SNAKE_CASE ={
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 16000,
'return_attention_mask': False,
'do_normalize': True,
}
SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname ,snake_case )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case ) + '\n' )
with open(self.feature_extraction_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case ) + '\n' )
# load decoder from hub
SCREAMING_SNAKE_CASE ='hf-internal-testing/ngram-beam-search-decoder'
def _lowerCAmelCase ( self : Optional[int] ,**snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.add_kwargs_tokens_map.copy()
kwargs.update(snake_case )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**snake_case )
def _lowerCAmelCase ( self : Dict ,**snake_case : int ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**snake_case )
def _lowerCAmelCase ( self : List[str] ,**snake_case : Dict ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**snake_case )
def _lowerCAmelCase ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.get_tokenizer()
SCREAMING_SNAKE_CASE =self.get_feature_extractor()
SCREAMING_SNAKE_CASE =self.get_decoder()
SCREAMING_SNAKE_CASE =WavaVecaProcessorWithLM(tokenizer=snake_case ,feature_extractor=snake_case ,decoder=snake_case )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,snake_case )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,snake_case )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,snake_case )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
SCREAMING_SNAKE_CASE =WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(snake_case ,'include' ):
WavaVecaProcessorWithLM(
tokenizer=snake_case ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.get_feature_extractor()
SCREAMING_SNAKE_CASE =self.get_tokenizer()
SCREAMING_SNAKE_CASE =self.get_decoder()
SCREAMING_SNAKE_CASE =WavaVecaProcessorWithLM(tokenizer=snake_case ,feature_extractor=snake_case ,decoder=snake_case )
SCREAMING_SNAKE_CASE =floats_list((3, 1000) )
SCREAMING_SNAKE_CASE =feature_extractor(snake_case ,return_tensors='np' )
SCREAMING_SNAKE_CASE =processor(snake_case ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.get_feature_extractor()
SCREAMING_SNAKE_CASE =self.get_tokenizer()
SCREAMING_SNAKE_CASE =self.get_decoder()
SCREAMING_SNAKE_CASE =WavaVecaProcessorWithLM(tokenizer=snake_case ,feature_extractor=snake_case ,decoder=snake_case )
SCREAMING_SNAKE_CASE ='This is a test string'
SCREAMING_SNAKE_CASE =processor(text=snake_case )
SCREAMING_SNAKE_CASE =tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : List[Any]=(2, 10, 16) ,snake_case : Any=77 ):
np.random.seed(snake_case )
return np.random.rand(*snake_case )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.get_feature_extractor()
SCREAMING_SNAKE_CASE =self.get_tokenizer()
SCREAMING_SNAKE_CASE =self.get_decoder()
SCREAMING_SNAKE_CASE =WavaVecaProcessorWithLM(tokenizer=snake_case ,feature_extractor=snake_case ,decoder=snake_case )
SCREAMING_SNAKE_CASE =self._get_dummy_logits(shape=(10, 16) ,seed=13 )
SCREAMING_SNAKE_CASE =processor.decode(snake_case )
SCREAMING_SNAKE_CASE =decoder.decode_beams(snake_case )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('</s> <s> </s>' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def _lowerCAmelCase ( self : Dict ,snake_case : Any ):
SCREAMING_SNAKE_CASE =self.get_feature_extractor()
SCREAMING_SNAKE_CASE =self.get_tokenizer()
SCREAMING_SNAKE_CASE =self.get_decoder()
SCREAMING_SNAKE_CASE =WavaVecaProcessorWithLM(tokenizer=snake_case ,feature_extractor=snake_case ,decoder=snake_case )
SCREAMING_SNAKE_CASE =self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
SCREAMING_SNAKE_CASE =processor.batch_decode(snake_case )
else:
with get_context(snake_case ).Pool() as pool:
SCREAMING_SNAKE_CASE =processor.batch_decode(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =list(snake_case )
with get_context('fork' ).Pool() as p:
SCREAMING_SNAKE_CASE =decoder.decode_beams_batch(snake_case ,snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =[], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(snake_case ,decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] ,decoded_processor.text )
self.assertListEqual(snake_case ,decoded_processor.logit_score )
self.assertListEqual(snake_case ,decoded_processor.lm_score )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.get_feature_extractor()
SCREAMING_SNAKE_CASE =self.get_tokenizer()
SCREAMING_SNAKE_CASE =self.get_decoder()
SCREAMING_SNAKE_CASE =WavaVecaProcessorWithLM(tokenizer=snake_case ,feature_extractor=snake_case ,decoder=snake_case )
SCREAMING_SNAKE_CASE =self._get_dummy_logits()
SCREAMING_SNAKE_CASE =15
SCREAMING_SNAKE_CASE =-20.0
SCREAMING_SNAKE_CASE =-4.0
SCREAMING_SNAKE_CASE =processor.batch_decode(
snake_case ,beam_width=snake_case ,beam_prune_logp=snake_case ,token_min_logp=snake_case ,)
SCREAMING_SNAKE_CASE =decoded_processor_out.text
SCREAMING_SNAKE_CASE =list(snake_case )
with get_context('fork' ).Pool() as pool:
SCREAMING_SNAKE_CASE =decoder.decode_beams_batch(
snake_case ,snake_case ,beam_width=snake_case ,beam_prune_logp=snake_case ,token_min_logp=snake_case ,)
SCREAMING_SNAKE_CASE =[d[0][0] for d in decoded_decoder_out]
SCREAMING_SNAKE_CASE =[d[0][2] for d in decoded_decoder_out]
SCREAMING_SNAKE_CASE =[d[0][3] for d in decoded_decoder_out]
self.assertListEqual(snake_case ,snake_case )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] ,snake_case )
self.assertTrue(np.array_equal(snake_case ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] ,snake_case ,atol=1e-3 ) )
self.assertTrue(np.array_equal(snake_case ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] ,snake_case ,atol=1e-3 ) )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.get_feature_extractor()
SCREAMING_SNAKE_CASE =self.get_tokenizer()
SCREAMING_SNAKE_CASE =self.get_decoder()
SCREAMING_SNAKE_CASE =WavaVecaProcessorWithLM(tokenizer=snake_case ,feature_extractor=snake_case ,decoder=snake_case )
SCREAMING_SNAKE_CASE =self._get_dummy_logits()
SCREAMING_SNAKE_CASE =2.0
SCREAMING_SNAKE_CASE =5.0
SCREAMING_SNAKE_CASE =-20.0
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =processor.batch_decode(
snake_case ,alpha=snake_case ,beta=snake_case ,unk_score_offset=snake_case ,lm_score_boundary=snake_case ,)
SCREAMING_SNAKE_CASE =decoded_processor_out.text
SCREAMING_SNAKE_CASE =list(snake_case )
decoder.reset_params(
alpha=snake_case ,beta=snake_case ,unk_score_offset=snake_case ,lm_score_boundary=snake_case ,)
with get_context('fork' ).Pool() as pool:
SCREAMING_SNAKE_CASE =decoder.decode_beams_batch(
snake_case ,snake_case ,)
SCREAMING_SNAKE_CASE =[d[0][0] for d in decoded_decoder_out]
self.assertListEqual(snake_case ,snake_case )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] ,snake_case )
SCREAMING_SNAKE_CASE =processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-20.0 )
self.assertEqual(lm_model.score_boundary ,snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
SCREAMING_SNAKE_CASE =processor.decoder.model_container[processor.decoder._model_key]
SCREAMING_SNAKE_CASE =Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(snake_case ,snake_case )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =snapshot_download('hf-internal-testing/processor_with_lm' )
SCREAMING_SNAKE_CASE =WavaVecaProcessorWithLM.from_pretrained(snake_case )
SCREAMING_SNAKE_CASE =processor.decoder.model_container[processor.decoder._model_key]
SCREAMING_SNAKE_CASE =Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
SCREAMING_SNAKE_CASE =os.listdir(snake_case )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(snake_case ,snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
SCREAMING_SNAKE_CASE =AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
SCREAMING_SNAKE_CASE =floats_list((3, 1000) )
SCREAMING_SNAKE_CASE =processor_wavaveca(snake_case ,return_tensors='np' )
SCREAMING_SNAKE_CASE =processor_auto(snake_case ,return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 )
SCREAMING_SNAKE_CASE =self._get_dummy_logits()
SCREAMING_SNAKE_CASE =processor_wavaveca.batch_decode(snake_case )
SCREAMING_SNAKE_CASE =processor_auto.batch_decode(snake_case )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.get_feature_extractor()
SCREAMING_SNAKE_CASE =self.get_tokenizer()
SCREAMING_SNAKE_CASE =self.get_decoder()
SCREAMING_SNAKE_CASE =WavaVecaProcessorWithLM(tokenizer=snake_case ,feature_extractor=snake_case ,decoder=snake_case )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='`processor` and `feature_extractor` model input names do not match' ,)
@staticmethod
def _lowerCAmelCase ( snake_case : Optional[int] ,snake_case : Any ):
SCREAMING_SNAKE_CASE =[d[key] for d in offsets]
return retrieved_list
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
SCREAMING_SNAKE_CASE =self._get_dummy_logits()[0]
SCREAMING_SNAKE_CASE =processor.decode(snake_case ,output_word_offsets=snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(snake_case ,snake_case ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] ,'word' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] ,'word' ) ,['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] ,'start_offset' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] ,'end_offset' ) ,[1, 3, 5] )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
SCREAMING_SNAKE_CASE =self._get_dummy_logits()
SCREAMING_SNAKE_CASE =processor.batch_decode(snake_case ,output_word_offsets=snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(snake_case ,snake_case ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(snake_case ,'word' ) ) for o in outputs['word_offsets']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] ,'word' ) ,['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] ,'start_offset' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] ,'end_offset' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _lowerCAmelCase ( self : Tuple ):
import torch
SCREAMING_SNAKE_CASE =load_dataset('common_voice' ,'en' ,split='train' ,streaming=snake_case )
SCREAMING_SNAKE_CASE =ds.cast_column('audio' ,datasets.Audio(sampling_rate=16000 ) )
SCREAMING_SNAKE_CASE =iter(snake_case )
SCREAMING_SNAKE_CASE =next(snake_case )
SCREAMING_SNAKE_CASE =AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
SCREAMING_SNAKE_CASE =WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
SCREAMING_SNAKE_CASE =processor(sample['audio']['array'] ,return_tensors='pt' ).input_values
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(snake_case ).logits.cpu().numpy()
SCREAMING_SNAKE_CASE =processor.decode(logits[0] ,output_word_offsets=snake_case )
SCREAMING_SNAKE_CASE =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
SCREAMING_SNAKE_CASE =[
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
SCREAMING_SNAKE_CASE ='WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(snake_case ,'word' ) ) ,snake_case )
self.assertEqual(' '.join(self.get_from_offsets(snake_case ,'word' ) ) ,output.text )
# output times
SCREAMING_SNAKE_CASE =torch.tensor(self.get_from_offsets(snake_case ,'start_time' ) )
SCREAMING_SNAKE_CASE =torch.tensor(self.get_from_offsets(snake_case ,'end_time' ) )
# fmt: off
SCREAMING_SNAKE_CASE =torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
SCREAMING_SNAKE_CASE =torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(snake_case ,snake_case ,atol=0.01 ) )
self.assertTrue(torch.allclose(snake_case ,snake_case ,atol=0.01 ) )
| 334 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCAmelCase = parser.parse_args()
if args.check_lib:
lowerCAmelCase = importlib.import_module('''transformers''')
lowerCAmelCase = Path(transformers_module.__file__).parent
else:
lowerCAmelCase = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 295 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
__lowerCAmelCase = {
'''bert-base-uncased''': 5_12,
'''bert-large-uncased''': 5_12,
'''bert-base-cased''': 5_12,
'''bert-large-cased''': 5_12,
'''bert-base-multilingual-uncased''': 5_12,
'''bert-base-multilingual-cased''': 5_12,
'''bert-base-chinese''': 5_12,
'''bert-base-german-cased''': 5_12,
'''bert-large-uncased-whole-word-masking''': 5_12,
'''bert-large-cased-whole-word-masking''': 5_12,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-base-cased-finetuned-mrpc''': 5_12,
'''bert-base-german-dbmdz-cased''': 5_12,
'''bert-base-german-dbmdz-uncased''': 5_12,
'''TurkuNLP/bert-base-finnish-cased-v1''': 5_12,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 5_12,
'''wietsedv/bert-base-dutch-cased''': 5_12,
}
__lowerCAmelCase = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class __a ( A_ ):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
__lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = BertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="[UNK]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[PAD]" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase__: str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
lowercase__: Optional[Any] = getattr(lowerCAmelCase__ , normalizer_state.pop('type' ) )
lowercase__: Tuple = do_lower_case
lowercase__: List[Any] = strip_accents
lowercase__: Any = tokenize_chinese_chars
lowercase__: Dict = normalizer_class(**lowerCAmelCase__ )
lowercase__: Dict = do_lower_case
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> int:
'''simple docstring'''
lowercase__: Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[str] = [self.sep_token_id]
lowercase__: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 196 |
from __future__ import annotations
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
if len(lowercase__ ) <= 1 or n <= 1:
return
insert_next(lowercase__ , n - 1 )
rec_insertion_sort(lowercase__ , n - 1 )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__lowercase, __lowercase= (
collection[index],
collection[index - 1],
)
insert_next(lowercase__ , index + 1 )
if __name__ == "__main__":
lowerCAmelCase = input('''Enter integers separated by spaces: ''')
lowerCAmelCase = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 295 | 0 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__snake_case = logging.get_logger(__name__)
logging.set_verbosity_info()
def a ( __a , __a ) -> List[str]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCamelCase__ :Optional[Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ :Dict = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase__ , output_loading_info=lowercase__ )
else:
UpperCamelCase__ :Dict = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ :List[Any] = ProphetNetForConditionalGeneration.from_pretrained(
lowercase__ , output_loading_info=lowercase__ )
UpperCamelCase__ :Optional[Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
UpperCamelCase__ :str = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
UpperCamelCase__ :List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
UpperCamelCase__ :List[Any] = prophet
UpperCamelCase__ :int = prophet_old
else:
UpperCamelCase__ :Any = prophet.prophetnet
UpperCamelCase__ :Any = prophet_old.model
UpperCamelCase__ :int = False
for attribute in attributes:
if attribute in mapping:
UpperCamelCase__ :List[Any] = mapping[attribute]
if not hasattr(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0:
UpperCamelCase__ :Optional[Any] = attribute
elif hasattr(lowercase__ , lowercase__ ):
UpperCamelCase__ :Union[str, Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCamelCase__ :int = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
UpperCamelCase__ :Tuple = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCamelCase__ :Optional[Any] = old_model.bias
logger.info(f'''{attribute} is initialized''' )
UpperCamelCase__ :Dict = True
break
elif attribute in special_keys and hasattr(lowercase__ , '''in_proj_weight''' ):
UpperCamelCase__ :Dict = old_model.in_proj_weight.shape[0] // 3
UpperCamelCase__ :Tuple = getattr(lowercase__ , lowercase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCamelCase__ :Dict = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCamelCase__ :List[str] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCamelCase__ :Optional[int] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCamelCase__ :Dict = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCamelCase__ :Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCamelCase__ :Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCamelCase__ :Optional[Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
UpperCamelCase__ :Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
UpperCamelCase__ :Optional[int] = True
break
if attribute.isdigit():
UpperCamelCase__ :Dict = model[int(lowercase__ )]
UpperCamelCase__ :Optional[Any] = old_model[int(lowercase__ )]
else:
UpperCamelCase__ :List[str] = getattr(lowercase__ , lowercase__ )
if old_attribute == "":
UpperCamelCase__ :Optional[Any] = old_model
else:
if not hasattr(lowercase__ , lowercase__ ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
UpperCamelCase__ :Union[str, Any] = getattr(lowercase__ , lowercase__ )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path) | 97 |
def _lowerCamelCase( lowercase__ , lowercase__ = " " ) -> list:
'''simple docstring'''
__lowercase= []
__lowercase= 0
for index, char in enumerate(lowercase__ ):
if char == separator:
split_words.append(string[last_index:index] )
__lowercase= index + 1
elif index + 1 == len(lowercase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 295 | 0 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCamelCase ( __lowercase : Any ,__lowercase : Dict ,__lowercase : List[str] ):
'''simple docstring'''
A_ : int = OmegaConf.load(lowercase__ )
A_ : int = torch.load(lowercase__ ,map_location='cpu' )['model']
A_ : Union[str, Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
A_ : Any = {}
A_ : Dict = 'first_stage_model.'
for key in keys:
if key.startswith(lowercase__ ):
A_ : Any = state_dict[key]
# extract state_dict for UNetLDM
A_ : List[str] = {}
A_ : str = 'model.diffusion_model.'
for key in keys:
if key.startswith(lowercase__ ):
A_ : int = state_dict[key]
A_ : Dict = config.model.params.first_stage_config.params
A_ : Optional[int] = config.model.params.unet_config.params
A_ : Dict = VQModel(**lowercase__ ).eval()
vqvae.load_state_dict(lowercase__ )
A_ : Dict = UNetLDMModel(**lowercase__ ).eval()
unet.load_state_dict(lowercase__ )
A_ : Tuple = DDIMScheduler(
timesteps=config.model.params.timesteps ,beta_schedule='scaled_linear' ,beta_start=config.model.params.linear_start ,beta_end=config.model.params.linear_end ,clip_sample=lowercase__ ,)
A_ : List[Any] = LDMPipeline(lowercase__ ,lowercase__ ,lowercase__ )
pipeline.save_pretrained(lowercase__ )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
_UpperCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 140 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _lowerCamelCase( lowercase__=None , lowercase__=None ) -> Dict:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowercase__ )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''The csv file to plot.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
UpperCamelCase_ : Optional[List[str]] =list_field(
default=A_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
try:
int(lowercase__ )
return True
except ValueError:
return False
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
try:
float(lowercase__ )
return True
except ValueError:
return False
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= args
__lowercase= defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
__lowercase= csv.DictReader(lowerCAmelCase )
for row in reader:
__lowercase= row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
__lowercase= int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
__lowercase= float(row['result'] )
def _A (self ):
__lowercase, __lowercase= plt.subplots()
__lowercase= 'Time usage' if self.args.is_time else 'Memory usage'
__lowercase= title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__lowercase= sorted(set(self.result_dict[model_name]['bsz'] ) )
__lowercase= sorted(set(self.result_dict[model_name]['seq_len'] ) )
__lowercase= self.result_dict[model_name]['result']
((__lowercase), (__lowercase))= (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__lowercase= (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__lowercase= np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase , )
else:
__lowercase= np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__lowercase), (__lowercase))= (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
__lowercase= np.asarray(lowerCAmelCase , lowerCAmelCase )[: len(lowerCAmelCase )]
plt.scatter(
lowerCAmelCase , lowerCAmelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(lowerCAmelCase , lowerCAmelCase , '--' )
title_str += f' {label_model_name} vs.'
__lowercase= title_str[:-4]
__lowercase= 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowerCAmelCase )
plt.xlabel(lowerCAmelCase )
plt.ylabel(lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _lowerCamelCase( ) -> str:
'''simple docstring'''
__lowercase= HfArgumentParser(lowercase__ )
__lowercase= parser.parse_args_into_dataclasses()[0]
__lowercase= Plot(args=lowercase__ )
plot.plot()
if __name__ == "__main__":
main()
| 295 | 0 |
'''simple docstring'''
import os
import sys
import unittest
UpperCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCamelCase__ = os.path.join(git_repo_path, '''src''', '''transformers''')
UpperCamelCase__ = '''
{0} = None
'''
UpperCamelCase__ = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
UpperCamelCase__ = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(_A )
UpperCAmelCase__ : Optional[int] = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(_A , '''tokenizers''' )
UpperCAmelCase__ : Dict = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(_A , '''tensorflow_text''' )
UpperCAmelCase__ : Union[str, Any] = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(_A , '''sentencepiece_and_tokenizers''' )
UpperCAmelCase__ : str = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(_A , '''sentencepiece_and_tensorflow_text''' )
UpperCAmelCase__ : Any = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(_A , '''sentencepiece_and_tokenizers_and_vision''' )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , _A )
self.assertIn('''tensorflow_text''' , _A )
self.assertIn('''sentencepiece_and_tokenizers''' , _A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(_A , '''\nCONSTANT = None\n''' )
UpperCAmelCase__ : int = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
_A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
UpperCAmelCase__ : Any = '''\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'''
UpperCAmelCase__ : Any = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(_A , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'''
UpperCAmelCase__ : int = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , _A )
| 181 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2,
}
lowerCAmelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-question_encoder-multiset-base''': 5_1_2,
}
lowerCAmelCase = {
'''facebook/dpr-reader-single-nq-base''': 5_1_2,
'''facebook/dpr-reader-multiset-base''': 5_1_2,
}
lowerCAmelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class A ( A_ ):
UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase_ : Dict =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : int =DPRContextEncoderTokenizer
class A ( A_ ):
UpperCamelCase_ : Any =VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : List[Any] =DPRQuestionEncoderTokenizer
lowerCAmelCase = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCAmelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCAmelCase = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(A_ )
class A :
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , )
elif titles is None or texts is None:
__lowercase= titles if texts is None else texts
return super().__call__(
lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= titles if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [titles]
__lowercase= texts if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [texts]
__lowercase= len(lowerCAmelCase )
__lowercase= questions if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), f'There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.'
__lowercase= super().__call__(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids']
__lowercase= super().__call__(lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids']
__lowercase= {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase , lowerCAmelCase )
]
}
if return_attention_mask is not False:
__lowercase= []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowercase= attention_mask
return self.pad(lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_6 , lowerCAmelCase = 6_4 , lowerCAmelCase = 4 , ):
__lowercase= reader_input['input_ids']
__lowercase, __lowercase, __lowercase= reader_output[:3]
__lowercase= len(lowerCAmelCase )
__lowercase= sorted(range(lowerCAmelCase ) , reverse=lowerCAmelCase , key=relevance_logits.__getitem__ )
__lowercase= []
for doc_id in sorted_docs:
__lowercase= list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowercase= sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowercase= sequence_ids.index(self.pad_token_id )
else:
__lowercase= len(lowerCAmelCase )
__lowercase= self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase , top_spans=lowerCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase , start_index=lowerCAmelCase , end_index=lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= []
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowercase= sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] , reverse=lowerCAmelCase )
__lowercase= []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
__lowercase= end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A_ )
class A ( A_ , A_ ):
UpperCamelCase_ : Optional[int] =VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] =READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Dict =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] =READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask''']
UpperCamelCase_ : Dict =DPRReaderTokenizer
| 295 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_A : Optional[int] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class _lowercase ( A_ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[str] = '''facebook/nllb-200-distilled-600M'''
_SCREAMING_SNAKE_CASE : Optional[int] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
_SCREAMING_SNAKE_CASE : List[Any] = '''translator'''
_SCREAMING_SNAKE_CASE : Any = AutoTokenizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSeqaSeqLM
_SCREAMING_SNAKE_CASE : Optional[int] = LANGUAGE_CODES
_SCREAMING_SNAKE_CASE : Any = ['''text''', '''text''', '''text''']
_SCREAMING_SNAKE_CASE : List[str] = ['''text''']
def a ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
__lowerCAmelCase = self.lang_to_code[src_lang]
__lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> str:
return self.model.generate(**SCREAMING_SNAKE_CASE__ )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
| 229 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A ( nn.Module ):
def __init__(self ):
super().__init__()
__lowercase= nn.Linear(3 , 4 )
__lowercase= nn.BatchNormad(4 )
__lowercase= nn.Linear(4 , 5 )
def _A (self , lowerCAmelCase ):
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase ) ) )
class A ( A_ ):
def _A (self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ):
return (args[0] + 1,) + args[1:], kwargs
class A ( A_ ):
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return output + 1
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= ModelForTest()
__lowercase= ModelHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(test_model._hf_hook , lowerCAmelCase )
self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowerCAmelCase )
self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) )
self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= ModelHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase , append=lowerCAmelCase )
self.assertEqual(isinstance(test_model._hf_hook , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowerCAmelCase )
self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) )
self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= torch.randn(2 , 3 )
__lowercase= test_model(x + 1 )
__lowercase= test_model(x + 2 )
__lowercase= PreForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__lowercase= PreForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__lowercase= SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= torch.randn(2 , 3 )
__lowercase= test_model(lowerCAmelCase )
__lowercase= PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__lowercase= PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__lowercase= SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
assert torch.allclose(lowerCAmelCase , output + 2 , atol=1E-5 )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= torch.randn(2 , 3 )
__lowercase= test_model(lowerCAmelCase )
__lowercase= PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__lowercase= True
__lowercase= test_model(lowerCAmelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowerCAmelCase , AlignDevicesHook(io_same_device=lowerCAmelCase ) )
__lowercase= torch.randn(2 , 3 ).to(0 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , torch.device(0 ) )
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__lowercase= {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase= torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
__lowercase= {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__lowercase= 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase= torch.device(lowerCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , offload_buffers=lowerCAmelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__lowercase= 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase= torch.device(lowerCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() , offload_buffers=lowerCAmelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 295 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowercase_ (A_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = '''instructblip_vision_model'''
def __init__( self : Tuple ,lowercase__ : Tuple=1_4_0_8 ,lowercase__ : Any=6_1_4_4 ,lowercase__ : List[Any]=3_9 ,lowercase__ : Optional[int]=1_6 ,lowercase__ : str=2_2_4 ,lowercase__ : List[str]=1_4 ,lowercase__ : Union[str, Any]="gelu" ,lowercase__ : int=1e-6 ,lowercase__ : str=0.0 ,lowercase__ : List[Any]=1e-1_0 ,lowercase__ : Tuple=True ,**lowercase__ : int ,):
super().__init__(**lowercase__ )
__lowercase = hidden_size
__lowercase = intermediate_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_range
__lowercase = attention_dropout
__lowercase = layer_norm_eps
__lowercase = hidden_act
__lowercase = qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,lowercase__ : List[str] ,**lowercase__ : Union[str, Any] ):
cls._set_token_in_kwargs(lowercase__ )
__lowercase , __lowercase = cls.get_config_dict(lowercase__ ,**lowercase__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
__lowercase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase__ ,**lowercase__ )
class lowercase_ (A_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = '''instructblip_qformer'''
def __init__( self : Dict ,lowercase__ : List[str]=3_0_5_2_2 ,lowercase__ : str=7_6_8 ,lowercase__ : Union[str, Any]=1_2 ,lowercase__ : List[Any]=1_2 ,lowercase__ : Tuple=3_0_7_2 ,lowercase__ : List[Any]="gelu" ,lowercase__ : Optional[Any]=0.1 ,lowercase__ : Optional[int]=0.1 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : List[str]=0.0_2 ,lowercase__ : Any=1e-1_2 ,lowercase__ : Union[str, Any]=0 ,lowercase__ : Union[str, Any]="absolute" ,lowercase__ : Optional[Any]=2 ,lowercase__ : Any=1_4_0_8 ,**lowercase__ : List[Any] ,):
super().__init__(pad_token_id=lowercase__ ,**lowercase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = cross_attention_frequency
__lowercase = encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str ,lowercase__ : List[Any] ,**lowercase__ : Tuple ):
cls._set_token_in_kwargs(lowercase__ )
__lowercase , __lowercase = cls.get_config_dict(lowercase__ ,**lowercase__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
__lowercase = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls ,'''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase__ ,**lowercase__ )
class lowercase_ (A_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = '''instructblip'''
SCREAMING_SNAKE_CASE : Optional[int] = True
def __init__( self : int ,lowercase__ : Any=None ,lowercase__ : List[Any]=None ,lowercase__ : Optional[int]=None ,lowercase__ : str=3_2 ,**lowercase__ : Optional[Any] ):
super().__init__(**lowercase__ )
if vision_config is None:
__lowercase = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
__lowercase = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
__lowercase = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
__lowercase = InstructBlipVisionConfig(**lowercase__ )
__lowercase = InstructBlipQFormerConfig(**lowercase__ )
__lowercase = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
__lowercase = CONFIG_MAPPING[text_model_type](**lowercase__ )
__lowercase = self.text_config.tie_word_embeddings
__lowercase = self.text_config.is_encoder_decoder
__lowercase = num_query_tokens
__lowercase = self.vision_config.hidden_size
__lowercase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__lowercase = 1.0
__lowercase = 0.0_2
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,lowercase__ : Dict ,lowercase__ : List[str] ,lowercase__ : str ,**lowercase__ : str ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.vision_config.to_dict()
__lowercase = self.qformer_config.to_dict()
__lowercase = self.text_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 104 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= logging.get_logger()
# the current default level is logging.WARNING
__lowercase= logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowerCAmelCase )
def _A (self ):
__lowercase= logging.get_verbosity()
__lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' )
__lowercase= 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowerCAmelCase ) as cl:
logger.warning(lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowerCAmelCase ) as cl:
logger.warning(lowerCAmelCase )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowerCAmelCase ) as cl:
logger.warning(lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(lowerCAmelCase )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def _A (self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' )
__lowercase= os.getenv('TRANSFORMERS_VERBOSITY' , lowerCAmelCase )
__lowercase= logging.log_levels[env_level_str]
__lowercase= logging.get_verbosity()
self.assertEqual(
lowerCAmelCase , lowerCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
__lowercase= ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def _A (self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
__lowercase= logging.logging.getLogger()
with CaptureLogger(lowerCAmelCase ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def _A (self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
__lowercase= logging.get_logger('transformers.models.bart.tokenization_bart' )
__lowercase= 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowerCAmelCase ) as cl:
logger.warning_advice(lowerCAmelCase )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowerCAmelCase ) as cl:
logger.warning_advice(lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 295 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
A : List[Any] = data
A : Dict = None
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : Optional[int] = None
A : Optional[int] = None
def __iter__( self ):
A : str = self.head
while self.head:
yield node.data
A : Optional[int] = node.next
if node == self.head:
break
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join(str(lowerCamelCase__ ) for item in iter(self ) )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
self.insert_nth(len(self ), lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
self.insert_nth(0, lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
if index < 0 or index > len(self ):
raise IndexError("""list index out of range.""" )
A : str = Node(lowerCamelCase__ )
if self.head is None:
A : int = new_node # first node points itself
A : Tuple = new_node
elif index == 0: # insert at head
A : Any = self.head
A : Any = new_node
else:
A : Dict = self.head
for _ in range(index - 1 ):
A : Optional[int] = temp.next
A : int = temp.next
A : Tuple = new_node
if index == len(self ) - 1: # insert at tail
A : str = new_node
def _lowerCAmelCase ( self ):
return self.delete_nth(0 )
def _lowerCAmelCase ( self ):
return self.delete_nth(len(self ) - 1 )
def _lowerCAmelCase ( self, lowerCamelCase__ = 0 ):
if not 0 <= index < len(self ):
raise IndexError("""list index out of range.""" )
A : Any = self.head
if self.head == self.tail: # just one node
A : int = None
elif index == 0: # delete head node
A : List[Any] = self.tail.next.next
A : Optional[int] = self.head.next
else:
A : Optional[int] = self.head
for _ in range(index - 1 ):
A : Optional[Any] = temp.next
A : Optional[int] = temp.next
A : Optional[int] = temp.next.next
if index == len(self ) - 1: # delete at tail
A : Union[str, Any] = temp
return delete_node.data
def _lowerCAmelCase ( self ):
return len(self ) == 0
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : List[str] = CircularLinkedList()
assert len(lowercase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowercase__ ) == i
circular_linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = '''▁'''
lowerCAmelCase = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
lowerCAmelCase = {
'''google/pegasus-xsum''': 5_1_2,
}
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Union[str, Any] =VOCAB_FILES_NAMES
UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase_ : int =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int =['''input_ids''', '''attention_mask''']
def __init__(self , lowerCAmelCase , lowerCAmelCase="<pad>" , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<mask_2>" , lowerCAmelCase="<mask_1>" , lowerCAmelCase=None , lowerCAmelCase=1_0_3 , lowerCAmelCase = None , **lowerCAmelCase , ):
__lowercase= offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError(
f'additional_special_tokens should be of type {type(lowerCAmelCase )}, but is'
f' {type(lowerCAmelCase )}' )
__lowercase= (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(lowerCAmelCase ) , self.offset - 1 )
]
if len(set(lowerCAmelCase ) ) != len(lowerCAmelCase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
__lowercase= additional_special_tokens_extended
else:
__lowercase= [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
__lowercase= {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , mask_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token_sent=lowerCAmelCase , offset=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
__lowercase= mask_token_sent
__lowercase= vocab_file
__lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
# add special tokens to encoder dict
__lowercase= {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__lowercase= {v: k for k, v in self.encoder.items()}
@property
def _A (self ):
return len(self.sp_model ) + self.offset
def _A (self ):
__lowercase= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
__lowercase= self.__dict__.copy()
__lowercase= None
return state
def __setstate__(self , lowerCAmelCase ):
__lowercase= d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__lowercase= {}
__lowercase= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A (self , lowerCAmelCase ):
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def _A (self , lowerCAmelCase ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__lowercase= self.sp_model.piece_to_id(lowerCAmelCase )
return sp_id + self.offset
def _A (self , lowerCAmelCase ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__lowercase= self.sp_model.IdToPiece(index - self.offset )
return token
def _A (self , lowerCAmelCase ):
__lowercase= []
__lowercase= ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase ) + token
__lowercase= []
else:
current_sub_tokens.append(lowerCAmelCase )
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def _A (self , lowerCAmelCase=False ):
return 1
def _A (self , lowerCAmelCase ):
__lowercase= set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ):
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _A (self , lowerCAmelCase , lowerCAmelCase=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase= os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , 'wb' ) as fi:
__lowercase= self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
| 295 | 0 |
lowercase_ : Union[str, Any] = [
(10_00, 'M'),
(9_00, 'CM'),
(5_00, 'D'),
(4_00, 'CD'),
(1_00, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while place < len(lowercase__ ):
if (place + 1 < len(lowercase__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = []
for arabic, roman in ROMAN:
((_UpperCAmelCase) , (_UpperCAmelCase)) = divmod(lowercase__ , lowercase__ )
result.append(roman * factor )
if number == 0:
break
return "".join(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= intermediate_size
__lowercase= hidden_act
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= scope
__lowercase= self.vocab_size - 1
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__lowercase= ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= OpenAIGPTModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , head_mask=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= OpenAIGPTLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= OpenAIGPTDoubleHeadsModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= OpenAIGPTForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] =(
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCamelCase_ : Tuple =(
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCamelCase_ : List[str] =(
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__lowercase= torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase , )
__lowercase= inputs_dict['labels']
__lowercase= inputs_dict['labels']
__lowercase= torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase , )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= OpenAIGPTModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , n_embd=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase )
@slow
def _A (self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= OpenAIGPTModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowerCAmelCase ) # the president is
__lowercase= [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase )
| 295 | 0 |
'''simple docstring'''
from itertools import permutations
def __lowerCamelCase ( __snake_case : Tuple ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
A__ : Union[str, Any] =[7, 11, 13, 17]
for i, test in enumerate(lowercase__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __lowerCamelCase ( __snake_case : List[Any] = 10 ) -> int:
"""simple docstring"""
return sum(
int("""""".join(map(lowercase__, lowercase__ ) ) )
for num in permutations(range(lowercase__ ) )
if is_substring_divisible(lowercase__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 134 |
from math import isqrt
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) )
def _lowerCamelCase( lowercase__ = 1_0**6 ) -> int:
'''simple docstring'''
__lowercase= 0
__lowercase= 1
__lowercase= 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 295 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class SCREAMING_SNAKE_CASE ( A_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = ShapEImgaImgPipeline
lowercase__ = ['''image''']
lowercase__ = ['''image''']
lowercase__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
lowercase__ = False
@property
def __lowerCAmelCase ( self : Dict ):
return 3_2
@property
def __lowerCAmelCase ( self : Tuple ):
return 3_2
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Tuple ):
return 8
@property
def __lowerCAmelCase ( self : Optional[int] ):
torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=6_4 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
lowerCAmelCase__ : Tuple = CLIPVisionModel(lowercase_ )
return model
@property
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Tuple = CLIPImageProcessor(
crop_size=2_2_4 ,do_center_crop=lowercase_ ,do_normalize=lowercase_ ,do_resize=lowercase_ ,image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] ,image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] ,resample=3 ,size=2_2_4 ,)
return image_processor
@property
def __lowerCAmelCase ( self : int ):
torch.manual_seed(0 )
lowerCAmelCase__ : int = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCAmelCase__ : Tuple = PriorTransformer(**lowercase_ )
return model
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCAmelCase__ : List[Any] = ShapERenderer(**lowercase_ )
return model
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : int = self.dummy_prior
lowerCAmelCase__ : str = self.dummy_image_encoder
lowerCAmelCase__ : Union[str, Any] = self.dummy_image_processor
lowerCAmelCase__ : str = self.dummy_renderer
lowerCAmelCase__ : int = HeunDiscreteScheduler(
beta_schedule='''exp''' ,num_train_timesteps=1_0_2_4 ,prediction_type='''sample''' ,use_karras_sigmas=lowercase_ ,clip_sample=lowercase_ ,clip_sample_range=1.0 ,)
lowerCAmelCase__ : Optional[int] = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __lowerCAmelCase ( self : List[str] ,lowercase_ : List[Any] ,lowercase_ : Optional[int]=0 ):
lowerCAmelCase__ : int = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(lowercase_ ) ).to(lowercase_ )
if str(lowercase_ ).startswith('''mps''' ):
lowerCAmelCase__ : Tuple = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase__ : int = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase__ : str = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : Optional[Any] = '''cpu'''
lowerCAmelCase__ : str = self.get_dummy_components()
lowerCAmelCase__ : Optional[Any] = self.pipeline_class(**lowercase_ )
lowerCAmelCase__ : Union[str, Any] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : Tuple = pipe(**self.get_dummy_inputs(lowercase_ ) )
lowerCAmelCase__ : Any = output.images[0]
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
lowerCAmelCase__ : Tuple = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self : List[Any] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Optional[Any] = torch_device == '''cpu'''
lowerCAmelCase__ : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=lowercase_ ,relax_max_difference=lowercase_ ,)
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = self.pipeline_class(**lowercase_ )
lowerCAmelCase__ : Any = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : str = 2
lowerCAmelCase__ : str = self.get_dummy_inputs(lowercase_ )
for key in inputs.keys():
if key in self.batch_params:
lowerCAmelCase__ : List[Any] = batch_size * [inputs[key]]
lowerCAmelCase__ : List[str] = pipe(**lowercase_ ,num_images_per_prompt=lowercase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowerCAmelCase__ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowerCAmelCase__ : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowerCAmelCase__ : Dict = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : int = torch.Generator(device=lowercase_ ).manual_seed(0 )
lowerCAmelCase__ : Any = pipe(
lowercase_ ,generator=lowercase_ ,guidance_scale=3.0 ,num_inference_steps=6_4 ,frame_size=6_4 ,output_type='''np''' ,).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(lowercase_ ,lowercase_ )
| 106 |
from __future__ import annotations
def _lowerCamelCase( lowercase__ ) -> list[int]:
'''simple docstring'''
__lowercase= 2
__lowercase= []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowercase__ )
if n > 1:
factors.append(lowercase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class a_ :
"""simple docstring"""
def __init__( self : int ,snake_case : Optional[Any] ,snake_case : List[Any] = 13 ,snake_case : Dict = 64 ,snake_case : str = 2 ,snake_case : Tuple = 3 ,snake_case : Any = 3 ,snake_case : str = True ,snake_case : Optional[Any] = True ,snake_case : str = 128 ,snake_case : Optional[int]=[16, 32, 64, 128] ,snake_case : Optional[Any] = 7 ,snake_case : Optional[Any] = 4 ,snake_case : List[Any] = 37 ,snake_case : List[str] = "gelu" ,snake_case : Optional[Any] = 0.1 ,snake_case : Tuple = 0.1 ,snake_case : str = 10 ,snake_case : List[Any] = 0.02 ,snake_case : int = 2 ,snake_case : int = 1 ,snake_case : List[str] = 128 ,snake_case : int = [2, 2, 2, 2] ,snake_case : Optional[int] = 2 ,snake_case : Tuple = 2 ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =encoder_stride
SCREAMING_SNAKE_CASE =num_attention_outputs
SCREAMING_SNAKE_CASE =embed_dim
SCREAMING_SNAKE_CASE =embed_dim + 1
SCREAMING_SNAKE_CASE =resolution
SCREAMING_SNAKE_CASE =depths
SCREAMING_SNAKE_CASE =hidden_sizes
SCREAMING_SNAKE_CASE =dim
SCREAMING_SNAKE_CASE =mlp_expansion_ratio
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : Any ):
return EfficientFormerConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=snake_case ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,resolution=self.resolution ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,dim=self.dim ,mlp_expansion_ratio=self.mlp_expansion_ratio ,)
def _lowerCAmelCase ( self : Tuple ,snake_case : Tuple ,snake_case : Union[str, Any] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =TFEfficientFormerModel(config=snake_case )
SCREAMING_SNAKE_CASE =model(snake_case ,training=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Any ,snake_case : Optional[Any] ,snake_case : str ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =self.type_sequence_label_size
SCREAMING_SNAKE_CASE =TFEfficientFormerForImageClassification(snake_case )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case ,training=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =TFEfficientFormerForImageClassification(snake_case )
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =model(snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__UpperCAmelCase = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =TFEfficientFormerModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(
self ,config_class=snake_case ,has_text_modality=snake_case ,hidden_size=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def _lowerCAmelCase ( self : Optional[Any] ):
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def _lowerCAmelCase ( self : Any ):
pass
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(snake_case )
SCREAMING_SNAKE_CASE =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] ,snake_case )
def _lowerCAmelCase ( self : Tuple ):
def check_hidden_states_output(snake_case : Optional[int] ,snake_case : Union[str, Any] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =model_class(snake_case )
SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(snake_case ,snake_case ) ,training=snake_case )
SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE =getattr(
self.model_tester ,'expected_num_hidden_layers' ,self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case ) ,snake_case )
if hasattr(self.model_tester ,'encoder_seq_length' ):
SCREAMING_SNAKE_CASE =self.model_tester.encoder_seq_length
if hasattr(self.model_tester ,'chunk_length' ) and self.model_tester.chunk_length > 1:
SCREAMING_SNAKE_CASE =seq_length * self.model_tester.chunk_length
else:
SCREAMING_SNAKE_CASE =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
if config.is_encoder_decoder:
SCREAMING_SNAKE_CASE =outputs.decoder_hidden_states
self.asseretIsInstance(snake_case ,(list, tuple) )
self.assertEqual(len(snake_case ) ,snake_case )
SCREAMING_SNAKE_CASE =getattr(self.model_tester ,'seq_length' ,snake_case )
SCREAMING_SNAKE_CASE =getattr(self.model_tester ,'decoder_seq_length' ,snake_case )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) ,[decoder_seq_length, self.model_tester.hidden_size] ,)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =True
check_hidden_states_output(snake_case ,snake_case ,snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE =True
check_hidden_states_output(snake_case ,snake_case ,snake_case )
def _lowerCAmelCase ( self : Any ,snake_case : List[str] ,snake_case : Tuple ,snake_case : int=False ):
SCREAMING_SNAKE_CASE =super()._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def _lowerCAmelCase ( self : Optional[Any] ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =TFEfficientFormerModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =getattr(self.model_tester ,'seq_length' ,snake_case )
SCREAMING_SNAKE_CASE =getattr(self.model_tester ,'encoder_seq_length' ,snake_case )
SCREAMING_SNAKE_CASE =getattr(self.model_tester ,'key_length' ,snake_case )
SCREAMING_SNAKE_CASE =getattr(self.model_tester ,'chunk_length' ,snake_case )
if chunk_length is not None and hasattr(self.model_tester ,'num_hashes' ):
SCREAMING_SNAKE_CASE =encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =False
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =model_class(snake_case )
SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(snake_case ,snake_case ) ,training=snake_case )
SCREAMING_SNAKE_CASE =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case ) ,self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =model_class(snake_case )
SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(snake_case ,snake_case ) ,training=snake_case )
SCREAMING_SNAKE_CASE =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case ) ,self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] ,)
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] ,)
def _lowerCAmelCase ( self : Any ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
SCREAMING_SNAKE_CASE =model_class(snake_case )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
SCREAMING_SNAKE_CASE ={
key: tf.keras.Input(shape=val.shape[1:] ,dtype=val.dtype ,name=snake_case )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
SCREAMING_SNAKE_CASE =model(snake_case )
self.assertTrue(outputs_dict is not None )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Optional[Any] ):
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE =model(**snake_case ,training=snake_case )
# verify the logits
SCREAMING_SNAKE_CASE =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,snake_case ,atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=snake_case ,return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE =model(**snake_case ,training=snake_case )
# verify the logits
SCREAMING_SNAKE_CASE =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,snake_case )
SCREAMING_SNAKE_CASE =tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,snake_case ,atol=1e-4 ) )
| 334 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCAmelCase = None
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase = {
'''t5-small''': 5_1_2,
'''t5-base''': 5_1_2,
'''t5-large''': 5_1_2,
'''t5-3b''': 5_1_2,
'''t5-11b''': 5_1_2,
}
class A ( A_ ):
UpperCamelCase_ : Dict =VOCAB_FILES_NAMES
UpperCamelCase_ : Dict =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : str =['''input_ids''', '''attention_mask''']
UpperCamelCase_ : List[str] =TaTokenizer
UpperCamelCase_ : List[int] =[]
def __init__(self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase=1_0_0 , lowerCAmelCase=None , **lowerCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowercase= [f'<extra_id_{i}>' for i in range(lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__lowercase= len(set(filter(lambda lowerCAmelCase : bool('extra_id_' in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= vocab_file
__lowercase= False if not self.vocab_file else True
__lowercase= extra_ids
@staticmethod
def _A (lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__lowercase= TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowerCAmelCase , )
return max_model_length
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase= os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ):
copyfile(self.vocab_file , lowerCAmelCase )
logger.info(f'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
__lowercase= token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__lowercase= token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
__lowercase= [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _A (self ):
return list(
set(filter(lambda lowerCAmelCase : bool(re.search(r'<extra_id_\d+>' , lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _A (self ):
return [self.convert_tokens_to_ids(lowerCAmelCase ) for token in self.get_sentinel_tokens()]
| 295 | 0 |
from ...processing_utils import ProcessorMixin
class __a ( A_ ):
__lowercase : Optional[Any] = '''WhisperFeatureExtractor'''
__lowercase : List[Any] = '''WhisperTokenizer'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: Any = self.feature_extractor
lowercase__: Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=lowerCAmelCase__ , language=lowerCAmelCase__ , no_timestamps=lowerCAmelCase__ )
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Any = kwargs.pop('audio' , lowerCAmelCase__ )
lowercase__: List[str] = kwargs.pop('sampling_rate' , lowerCAmelCase__ )
lowercase__: Optional[Any] = kwargs.pop('text' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowercase__: Optional[int] = args[0]
lowercase__: Tuple = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowercase__: int = self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
lowercase__: Optional[int] = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase__: Any = encodings['input_ids']
return inputs
def SCREAMING_SNAKE_CASE__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__="np" ) -> Any:
'''simple docstring'''
return self.tokenizer.get_prompt_ids(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
| 196 |
from collections.abc import Sequence
def _lowerCamelCase( lowercase__ , lowercase__ = False ) -> float:
'''simple docstring'''
if not arr:
return 0
__lowercase= 0 if allow_empty_subarrays else float('-inf' )
__lowercase= 0.0
for num in arr:
__lowercase= max(0 if allow_empty_subarrays else num , curr_sum + num )
__lowercase= max(lowercase__ , lowercase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 295 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowercase ( unittest.TestCase , A_ ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = load_tool('''text-to-speech''' )
self.tool.setup()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :int = self.tool('''hey''' )
UpperCamelCase__ :Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :Optional[Any] = self.tool('''hey''' )
UpperCamelCase__ :Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) ) | 97 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : Any =PriorTransformer
UpperCamelCase_ : List[str] ='''hidden_states'''
@property
def _A (self ):
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _A (self ):
return (4, 8)
@property
def _A (self ):
return (4, 8)
def _A (self ):
__lowercase= {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
__lowercase= self.dummy_input
return init_dict, inputs_dict
def _A (self ):
__lowercase, __lowercase= PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase )
__lowercase= model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _A (self ):
__lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common()
__lowercase= self.model_class(**lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowerCAmelCase )
def _A (self ):
__lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
__lowercase= model.to(lowerCAmelCase )
if hasattr(lowerCAmelCase , 'set_default_attn_processor' ):
model.set_default_attn_processor()
__lowercase= self.get_dummy_seed_input()
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
__lowercase= output[0, :5].flatten().cpu()
print(lowerCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) )
@slow
class A ( unittest.TestCase ):
def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= batch_size
__lowercase= embedding_dim
__lowercase= num_embeddings
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(lowerCAmelCase )
__lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase )
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
assert list(sample.shape ) == [1, 7_6_8]
__lowercase= sample[0, :8].flatten().cpu()
print(lowerCAmelCase )
__lowercase= torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
| 295 | 0 |
import string
def UpperCamelCase ( __lowercase : Any ):
'''simple docstring'''
A_ : Tuple = ''
for i in sequence:
A_ : List[Any] = ord(lowercase__ )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ : Tuple = string.ascii_letters
A_ : Optional[int] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase__ )] if c in letters else c for c in sequence )
def UpperCamelCase ( ):
'''simple docstring'''
from timeit import timeit
print('Running performance benchmarks...' )
A_ : List[Any] = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(f'''> atbash_slow(): {timeit('atbash_slow(printable)' ,setup=lowercase__ )} seconds''' )
print(f'''> atbash(): {timeit('atbash(printable)' ,setup=lowercase__ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 140 |
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__lowercase= len(lowercase__ )
__lowercase= max(lowercase__ )
__lowercase= min(lowercase__ )
# create the counting array
__lowercase= coll_max + 1 - coll_min
__lowercase= [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
__lowercase= counting_arr[i] + counting_arr[i - 1]
# create the output collection
__lowercase= [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
__lowercase= collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 295 | 0 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowerCAmelCase__ ) -> list[int]:
if len(lowercase__ ) == 0:
return array
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = min(lowercase__ ), max(lowercase__ )
# Compute the variables
UpperCAmelCase__ : Optional[int] = _max - _min + 1
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCAmelCase__ : Union[str, Any] = i - _min
UpperCAmelCase__ : List[Any] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCAmelCase__ : int = 0
for i in range(lowercase__ ):
while holes_repeat[i] > 0:
UpperCAmelCase__ : Optional[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = input('''Enter numbers separated by comma:\n''')
UpperCamelCase__ = [int(x) for x in user_input.split(''',''')]
print(pigeon_sort(unsorted))
| 181 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class A ( A_ ):
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_mask
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= vocab_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= intermediate_size
__lowercase= hidden_act
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= scope
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= None
if self.use_input_mask:
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A (self ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DistilBertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DistilBertForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= DistilBertForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= DistilBertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= DistilBertForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_choices
__lowercase= DistilBertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
((__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase), (__lowercase))= config_and_inputs
__lowercase= {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Any =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ : Optional[int] =(
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =True
UpperCamelCase_ : str =True
UpperCamelCase_ : Union[str, Any] =True
UpperCamelCase_ : Optional[int] =True
def _A (self ):
__lowercase= DistilBertModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase )
@slow
def _A (self ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= DistilBertModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__lowercase= True
__lowercase= model_class(config=lowerCAmelCase )
__lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
__lowercase= torch.jit.trace(
lowerCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase , os.path.join(lowerCAmelCase , 'traced_model.pt' ) )
__lowercase= torch.jit.load(os.path.join(lowerCAmelCase , 'traced_model.pt' ) , map_location=lowerCAmelCase )
loaded(inputs_dict['input_ids'].to(lowerCAmelCase ) , inputs_dict['attention_mask'].to(lowerCAmelCase ) )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= DistilBertModel.from_pretrained('distilbert-base-uncased' )
__lowercase= torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowercase= torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
__lowercase= torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase )
__lowercase= torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 ) )
| 295 | 0 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCamelCase_ ( snake_case_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowercase__ ):
for j in range(lowercase__ ):
__lowerCAmelCase = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_A : Union[str, Any] = imread('''image_data/lena.jpg''', 1)
# convert to its negative
_A : Optional[int] = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 229 |
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= [False] * len(lowercase__ )
__lowercase= []
queue.append(lowercase__ )
__lowercase= True
while queue:
__lowercase= queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase__ )
__lowercase= True
__lowercase= u
return visited[t]
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
__lowercase= [-1] * (len(lowercase__ ))
__lowercase= 0
while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__lowercase= float('Inf' )
__lowercase= sink
while s != source:
# Find the minimum value in select path
__lowercase= min(lowercase__ , graph[parent[s]][s] )
__lowercase= parent[s]
max_flow += path_flow
__lowercase= sink
while v != source:
__lowercase= parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowercase= parent[v]
return max_flow
lowerCAmelCase = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase ,lowerCAmelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 295 | 0 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowerCAmelCase__ = int(input('''Enter number: ''').strip())
print(f'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 104 |
from __future__ import annotations
def _lowerCamelCase( lowercase__ , lowercase__ ) -> bool:
'''simple docstring'''
__lowercase= get_failure_array(lowercase__ )
# 2) Step through text searching for pattern
__lowercase, __lowercase= 0, 0 # index into text, pattern
while i < len(lowercase__ ):
if pattern[j] == text[i]:
if j == (len(lowercase__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase= failure[j - 1]
continue
i += 1
return False
def _lowerCamelCase( lowercase__ ) -> list[int]:
'''simple docstring'''
__lowercase= [0]
__lowercase= 0
__lowercase= 1
while j < len(lowercase__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase= failure[i - 1]
continue
j += 1
failure.append(lowercase__ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCAmelCase = '''abc1abc12'''
lowerCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCAmelCase = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCAmelCase = '''ABABX'''
lowerCAmelCase = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCAmelCase = '''AAAB'''
lowerCAmelCase = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCAmelCase = '''abcdabcy'''
lowerCAmelCase = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCAmelCase = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 295 | 0 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
return self.get_dummy_input()
@property
def _lowerCAmelCase ( self ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def _lowerCAmelCase ( self, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=False, lowerCamelCase__=False, ):
A : int = 4
A : Tuple = 32
A : str = (32, 32)
A : str = torch.manual_seed(0 )
A : List[Any] = torch.device(lowerCamelCase__ )
A : List[str] = (batch_size, num_channels) + sizes
A : Union[str, Any] = randn_tensor(lowerCamelCase__, generator=lowerCamelCase__, device=lowerCamelCase__ )
A : Optional[Any] = {"""hidden_states""": hidden_states}
if include_temb:
A : Tuple = 128
A : List[Any] = randn_tensor((batch_size, temb_channels), generator=lowerCamelCase__, device=lowerCamelCase__ )
if include_res_hidden_states_tuple:
A : Dict = torch.manual_seed(1 )
A : Dict = (randn_tensor(lowerCamelCase__, generator=lowerCamelCase__, device=lowerCamelCase__ ),)
if include_encoder_hidden_states:
A : Union[str, Any] = floats_tensor((batch_size, 32, 32) ).to(lowerCamelCase__ )
if include_skip_sample:
A : int = randn_tensor(((batch_size, 3) + sizes), generator=lowerCamelCase__, device=lowerCamelCase__ )
return dummy_input
def _lowerCAmelCase ( self ):
A : Any = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
A : List[str] = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
A : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A , A : str = self.prepare_init_args_and_inputs_for_common()
A : Optional[int] = self.block_class(**lowerCamelCase__ )
unet_block.to(lowerCamelCase__ )
unet_block.eval()
with torch.no_grad():
A : str = unet_block(**lowerCamelCase__ )
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : List[str] = output[0]
self.assertEqual(output.shape, self.output_shape )
A : int = output[0, -1, -3:, -3:]
A : Dict = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
assert torch_all_close(output_slice.flatten(), lowerCamelCase__, atol=5e-3 )
@unittest.skipIf(torch_device == """mps""", """Training is not supported in mps""" )
def _lowerCAmelCase ( self ):
A , A : List[Any] = self.prepare_init_args_and_inputs_for_common()
A : List[str] = self.block_class(**lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
A : List[str] = model(**lowerCamelCase__ )
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : Union[str, Any] = output[0]
A : Dict = torch.device(lowerCamelCase__ )
A : Dict = randn_tensor(output.shape, device=lowerCamelCase__ )
A : Optional[Any] = torch.nn.functional.mse_loss(lowerCamelCase__, lowerCamelCase__ )
loss.backward()
| 116 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 295 | 0 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
lowercase_ : List[str] = namedtuple('covid_data', 'cases deaths recovered')
def __SCREAMING_SNAKE_CASE ( snake_case_ = "https://www.worldometers.info/coronavirus/" ):
'''simple docstring'''
_UpperCAmelCase = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(lowercase__ ).content ).xpath(lowercase__ ) )
lowercase_ : Dict = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 133 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A ( enum.Enum ):
UpperCamelCase_ : Optional[int] =0
UpperCamelCase_ : Tuple =1
UpperCamelCase_ : Optional[int] =2
@add_end_docstrings(A_ )
class A ( A_ ):
UpperCamelCase_ : Union[str, Any] ='''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__(self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowercase= None
if self.model.config.prefix is not None:
__lowercase= self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowercase= self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowercase, __lowercase, __lowercase= self._sanitize_parameters(prefix=lowerCAmelCase , **self._forward_params )
__lowercase= {**self._preprocess_params, **preprocess_params}
__lowercase= {**self._forward_params, **forward_params}
def _A (self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ):
__lowercase= {}
if prefix is not None:
__lowercase= prefix
if prefix:
__lowercase= self.tokenizer(
lowerCAmelCase , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework )
__lowercase= prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
' [None, \'hole\']' )
__lowercase= handle_long_generation
preprocess_params.update(lowerCAmelCase )
__lowercase= generate_kwargs
__lowercase= {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
__lowercase= ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
__lowercase= ReturnType.TENSORS
if return_type is not None:
__lowercase= return_type
if clean_up_tokenization_spaces is not None:
__lowercase= clean_up_tokenization_spaces
if stop_sequence is not None:
__lowercase= self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
if len(lowerCAmelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowercase= stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _A (self , *lowerCAmelCase , **lowerCAmelCase ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase , **lowerCAmelCase )
def __call__(self , lowerCAmelCase , **lowerCAmelCase ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase="" , lowerCAmelCase=None , **lowerCAmelCase ):
__lowercase= self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework )
__lowercase= prompt_text
if handle_long_generation == "hole":
__lowercase= inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowercase= generate_kwargs['max_new_tokens']
else:
__lowercase= generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowercase= self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
__lowercase= inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
__lowercase= inputs['attention_mask'][:, -keep_length:]
return inputs
def _A (self , lowerCAmelCase , **lowerCAmelCase ):
__lowercase= model_inputs['input_ids']
__lowercase= model_inputs.get('attention_mask' , lowerCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowercase= None
__lowercase= None
__lowercase= 1
else:
__lowercase= input_ids.shape[0]
__lowercase= model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowercase= generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
__lowercase= 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowercase= generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowercase= 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowercase= self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase )
__lowercase= generated_sequence.shape[0]
if self.framework == "pt":
__lowercase= generated_sequence.reshape(lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowercase= tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _A (self , lowerCAmelCase , lowerCAmelCase=ReturnType.FULL_TEXT , lowerCAmelCase=True ):
__lowercase= model_outputs['generated_sequence'][0]
__lowercase= model_outputs['input_ids']
__lowercase= model_outputs['prompt_text']
__lowercase= generated_sequence.numpy().tolist()
__lowercase= []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowercase= {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowercase= self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowercase= 0
else:
__lowercase= len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) )
if return_type == ReturnType.FULL_TEXT:
__lowercase= prompt_text + text[prompt_length:]
else:
__lowercase= text[prompt_length:]
__lowercase= {'generated_text': all_text}
records.append(lowerCAmelCase )
return records
| 295 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__snake_case : Union[str, Any] = 50_0000
__snake_case , __snake_case : List[str] = os.path.split(__file__)
__snake_case : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def __lowerCamelCase ( __snake_case : Dict, **__snake_case : int ) -> List[Any]:
"""simple docstring"""
A__ : Tuple =dataset.map(**lowercase__ )
@get_duration
def __lowerCamelCase ( __snake_case : str, **__snake_case : int ) -> int:
"""simple docstring"""
A__ : List[Any] =dataset.filter(**lowercase__ )
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ : Optional[Any] ={"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict =datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
A__ : Any =generate_example_dataset(
os.path.join(lowercase__, """dataset.arrow""" ), lowercase__, num_examples=lowercase__ )
A__ : Dict =transformers.AutoTokenizer.from_pretrained("""bert-base-cased""", use_fast=lowercase__ )
def tokenize(__snake_case : Optional[int] ):
return tokenizer(examples["""text"""] )
A__ : List[str] =map(lowercase__ )
A__ : Union[str, Any] =map(lowercase__, batched=lowercase__ )
A__ : int =map(lowercase__, function=lambda __snake_case : None, batched=lowercase__ )
with dataset.formatted_as(type="""numpy""" ):
A__ : str =map(lowercase__, function=lambda __snake_case : None, batched=lowercase__ )
with dataset.formatted_as(type="""pandas""" ):
A__ : Dict =map(lowercase__, function=lambda __snake_case : None, batched=lowercase__ )
with dataset.formatted_as(type="""torch""", columns="""numbers""" ):
A__ : Optional[Any] =map(lowercase__, function=lambda __snake_case : None, batched=lowercase__ )
with dataset.formatted_as(type="""tensorflow""", columns="""numbers""" ):
A__ : Union[str, Any] =map(lowercase__, function=lambda __snake_case : None, batched=lowercase__ )
A__ : Optional[int] =map(lowercase__, function=lowercase__, batched=lowercase__ )
A__ : Union[str, Any] =filter(lowercase__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase__, """wb""" ) as f:
f.write(json.dumps(lowercase__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 134 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
@register_to_config
def __init__(self , lowerCAmelCase = 3 , lowerCAmelCase = 3 , lowerCAmelCase = ("DownEncoderBlock2D",) , lowerCAmelCase = ("UpDecoderBlock2D",) , lowerCAmelCase = (6_4,) , lowerCAmelCase = 1 , lowerCAmelCase = "silu" , lowerCAmelCase = 3 , lowerCAmelCase = 3_2 , lowerCAmelCase = 2_5_6 , lowerCAmelCase = 3_2 , lowerCAmelCase = None , lowerCAmelCase = 0.1_82_15 , lowerCAmelCase = "group" , ):
super().__init__()
# pass init params to Encoder
__lowercase= Encoder(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , down_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , double_z=lowerCAmelCase , )
__lowercase= vq_embed_dim if vq_embed_dim is not None else latent_channels
__lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 )
__lowercase= VectorQuantizer(lowerCAmelCase , lowerCAmelCase , beta=0.25 , remap=lowerCAmelCase , sane_index_shape=lowerCAmelCase )
__lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 )
# pass init params to Decoder
__lowercase= Decoder(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , up_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , norm_type=lowerCAmelCase , )
@apply_forward_hook
def _A (self , lowerCAmelCase , lowerCAmelCase = True ):
__lowercase= self.encoder(lowerCAmelCase )
__lowercase= self.quant_conv(lowerCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase )
@apply_forward_hook
def _A (self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ):
# also go through quantization layer
if not force_not_quantize:
__lowercase, __lowercase, __lowercase= self.quantize(lowerCAmelCase )
else:
__lowercase= h
__lowercase= self.post_quant_conv(lowerCAmelCase )
__lowercase= self.decoder(lowerCAmelCase , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = True ):
__lowercase= sample
__lowercase= self.encode(lowerCAmelCase ).latents
__lowercase= self.decode(lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase )
| 295 | 0 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__UpperCamelCase : Optional[Any] = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Any = list(s_dict.keys() )
for key in keys:
lowerCAmelCase__ : List[str] = r'''.*/layers_(\d+)'''
lowerCAmelCase__ : Union[str, Any] = key
if re.match(lowercase__ , lowercase__ ):
lowerCAmelCase__ : Union[str, Any] = re.sub(r'''layers_(\d+)''' , r'''block/\1/layer''' , lowercase__ )
lowerCAmelCase__ : str = r'''(encoder|decoder)\/'''
if re.match(lowercase__ , lowercase__ ):
lowerCAmelCase__ : List[str] = re.match(lowercase__ , lowercase__ ).groups()
if groups[0] == "encoder":
lowerCAmelCase__ : Dict = re.sub(r'''/mlp/''' , r'''/1/mlp/''' , lowercase__ )
lowerCAmelCase__ : List[str] = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/1/layer_norm/''' , lowercase__ )
elif groups[0] == "decoder":
lowerCAmelCase__ : Union[str, Any] = re.sub(r'''/mlp/''' , r'''/2/mlp/''' , lowercase__ )
lowerCAmelCase__ : Optional[Any] = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/2/layer_norm/''' , lowercase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCAmelCase__ : Union[str, Any] = new_key.replace(lowercase__ , lowercase__ )
print(f'{key} -> {new_key}' )
lowerCAmelCase__ : int = s_dict.pop(lowercase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase__ : Dict = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase__ : Optional[int] = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCAmelCase__ : Optional[int] = s_dict[key].shape[0]
lowerCAmelCase__ : Tuple = s_dict[key]
for idx in range(lowercase__ ):
lowerCAmelCase__ : int = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowercase__ )
return s_dict
__UpperCamelCase : Optional[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
import regex as re
with open(lowercase__ , '''r''' ) as f:
lowerCAmelCase__ : str = f.read()
lowerCAmelCase__ : Optional[int] = re.findall(r'''(.*) = ([0-9.]*)''' , lowercase__ )
lowerCAmelCase__ : List[str] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCAmelCase__ : int = float(lowercase__ ) if '''.''' in value else int(lowercase__ )
lowerCAmelCase__ : List[Any] = re.findall(r'''(.*activations) = \(\'(.*)\',\)''' , lowercase__ )[0]
lowerCAmelCase__ : Any = str(activation[1] )
lowerCAmelCase__ : Dict = num_experts
lowerCAmelCase__ : Optional[Any] = SwitchTransformersConfig(**lowercase__ )
return config
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_=None , A_="./" , A_=8 ):
print(f'Loading flax weights from : {flax_checkpoint_path}' )
lowerCAmelCase__ : Tuple = checkpoints.load_tax_checkpoint(lowercase__ )
if gin_file is not None:
lowerCAmelCase__ : Dict = convert_gin_to_config(lowercase__ , lowercase__ )
else:
lowerCAmelCase__ : Dict = SwitchTransformersConfig.from_pretrained(lowercase__ )
lowerCAmelCase__ : List[Any] = SwitchTransformersForConditionalGeneration(lowercase__ )
lowerCAmelCase__ : List[Any] = flax_params['''target''']
lowerCAmelCase__ : List[str] = flatten_dict(lowercase__ , sep='''/''' )
lowerCAmelCase__ : Dict = rename_keys(lowercase__ )
lowerCAmelCase__ : Any = unflatten_dict(lowercase__ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowercase__ , lowercase__ )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 106 |
import os
import numpy
import onnx
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= a.name
__lowercase= b.name
__lowercase= ''
__lowercase= ''
__lowercase= a == b
__lowercase= name_a
__lowercase= name_b
return res
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase__ , lowercase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
_graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(lowercase__ , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= list(model.graph.initializer )
__lowercase= list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__lowercase= inits[i].name
__lowercase= inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= os.path.dirname(lowercase__ )
__lowercase= os.path.basename(lowercase__ )
__lowercase= onnx.load(os.path.join(lowercase__ , lowercase__ ) )
__lowercase= list(model.graph.initializer )
__lowercase= set()
__lowercase= {}
__lowercase= []
__lowercase= 0
for i in range(len(lowercase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowercase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowercase__ )
dup_set.add(lowercase__ )
__lowercase= inits[j].data_type
__lowercase= numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowercase__ )
total_reduced_size += mem_size
__lowercase= inits[i].name
__lowercase= inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase__ )
else:
__lowercase= [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
__lowercase= sorted(lowercase__ )
_remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ )
__lowercase= 'optimized_' + model_file_name
__lowercase= os.path.join(lowercase__ , lowercase__ )
onnx.save(lowercase__ , lowercase__ )
return new_model
| 295 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
_lowerCamelCase ={
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
_lowerCamelCase ={
"abeja/gpt-neox-japanese-2.7b": 20_48,
}
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
with open(lowercase__, 'r', encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE =json.loads(f.read() )
SCREAMING_SNAKE_CASE =collections.OrderedDict()
SCREAMING_SNAKE_CASE =collections.OrderedDict()
SCREAMING_SNAKE_CASE =collections.OrderedDict()
with open(lowercase__, 'r', encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE =f.readlines()
SCREAMING_SNAKE_CASE =[[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowercase__ ):
SCREAMING_SNAKE_CASE =b
SCREAMING_SNAKE_CASE =idx
for wd in b:
SCREAMING_SNAKE_CASE =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class a_ ( A_ ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] ,snake_case : Optional[Any] ,snake_case : Union[str, Any] ,snake_case : str="<|endoftext|>" ,snake_case : Union[str, Any]="<|endoftext|>" ,snake_case : str="<|startoftext|>" ,snake_case : str="<|endoftext|>" ,snake_case : Union[str, Any]=False ,**snake_case : Optional[int] ,):
super().__init__(
unk_token=snake_case ,pad_token=snake_case ,bos_token=snake_case ,eos_token=snake_case ,do_clean_text=snake_case ,**snake_case ,)
if not os.path.isfile(snake_case ):
raise ValueError(
f'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case ):
raise ValueError(
f'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE =do_clean_text
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =load_vocab_and_emoji(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def _lowerCAmelCase ( self : int ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def _lowerCAmelCase ( self : Tuple ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def _lowerCAmelCase ( self : Any ,snake_case : Union[str, Any] ):
return self.subword_tokenizer.tokenize(snake_case ,clean=self.do_clean_text )
def _lowerCAmelCase ( self : List[str] ,snake_case : str ):
return self.vocab.get(snake_case ,self.vocab.get(self.unk_token ) )
def _lowerCAmelCase ( self : Any ,snake_case : Tuple ):
return self.subword_tokenizer.convert_id_to_token(snake_case )
def _lowerCAmelCase ( self : List[str] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =''.join(snake_case ).strip()
return out_string
def _lowerCAmelCase ( self : Any ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case ,add_special_tokens=snake_case ) + [self.eos_token_id] )
if len(snake_case ) > self.model_max_length:
SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self : List[str] ,snake_case : List[str] ,snake_case : str = None ):
SCREAMING_SNAKE_CASE =0
if os.path.isdir(snake_case ):
SCREAMING_SNAKE_CASE =os.path.join(
snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE =os.path.join(
snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE =(
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE =(
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE =token_index
writer.write(','.join(snake_case ) + '\n' )
index += 1
with open(snake_case ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case )
return vocab_file, emoji_file
class a_ ( A_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,snake_case : int ,snake_case : Optional[int] ,snake_case : int ):
SCREAMING_SNAKE_CASE =vocab # same as swe
SCREAMING_SNAKE_CASE =ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE =emoji
SCREAMING_SNAKE_CASE =np.max([len(snake_case ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE =re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE =re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE =re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE =re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE =re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE =re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE ='─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE ='▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE =str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : List[Any] ):
return len(self.ids_to_tokens )
def _lowerCAmelCase ( self : List[str] ,snake_case : int ):
SCREAMING_SNAKE_CASE =self.content_repattera.sub('<URL>' ,snake_case )
SCREAMING_SNAKE_CASE =self.content_repattera.sub('<EMAIL>' ,snake_case )
SCREAMING_SNAKE_CASE =self.content_repattera.sub('<TEL>' ,snake_case )
SCREAMING_SNAKE_CASE =self.content_repattera.sub('<DATE>' ,snake_case )
SCREAMING_SNAKE_CASE =self.content_repattera.sub('<DATE>' ,snake_case )
SCREAMING_SNAKE_CASE =self.content_repattera.sub('<PRICE>' ,snake_case )
SCREAMING_SNAKE_CASE =content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE =content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def _lowerCAmelCase ( self : List[str] ,snake_case : Optional[Any] ,snake_case : Optional[int]=False ):
SCREAMING_SNAKE_CASE =text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE =text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE =text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE =text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE =text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE =text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE =text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE =text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE =text.replace(snake_case ,snake_case )
if clean:
SCREAMING_SNAKE_CASE =self.clean_text(snake_case )
def check_simbol(snake_case : str ):
SCREAMING_SNAKE_CASE =x.encode()
if len(snake_case ) == 1 and len(snake_case ) == 2:
SCREAMING_SNAKE_CASE =(int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC_2A1 and c <= 0XC_2BF)
or (c >= 0XC_780 and c <= 0XC_783)
or (c >= 0XC_AB9 and c <= 0XC_BBF)
or (c >= 0XC_C80 and c <= 0XC_DA2)
):
return True
return False
def checkuae(snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE =x.encode()
if len(snake_case ) == 1 and len(snake_case ) == 3:
SCREAMING_SNAKE_CASE =(int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE28_080 and c <= 0XE2B_07F:
return True
return False
SCREAMING_SNAKE_CASE =0
SCREAMING_SNAKE_CASE =[]
while pos < len(snake_case ):
SCREAMING_SNAKE_CASE =min(len(snake_case ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE =[] # (token_id, token, pos)
for e in range(snake_case ,snake_case ,-1 ):
SCREAMING_SNAKE_CASE =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case ) > 2:
SCREAMING_SNAKE_CASE =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =sorted(snake_case ,key=lambda snake_case : x[0] )[0]
result.append(snake_case )
SCREAMING_SNAKE_CASE =e
else:
SCREAMING_SNAKE_CASE =pos + 1
SCREAMING_SNAKE_CASE =text[pos:end]
if check_simbol(snake_case ):
result.append('<KIGOU>' )
elif checkuae(snake_case ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE =end
return result
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : str ,snake_case : List[str]="\n" ):
SCREAMING_SNAKE_CASE =[]
SCREAMING_SNAKE_CASE =[]
SCREAMING_SNAKE_CASE =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case ) > 0:
words.append(bytearray(snake_case ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case )
if len(snake_case ) > 0:
words.append(bytearray(snake_case ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE =''.join(snake_case )
return text
| 334 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCAmelCase = parser.parse_args()
if args.check_lib:
lowerCAmelCase = importlib.import_module('''transformers''')
lowerCAmelCase = Path(transformers_module.__file__).parent
else:
lowerCAmelCase = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 295 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCAmelCase = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 196 |
from __future__ import annotations
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
if len(lowercase__ ) <= 1 or n <= 1:
return
insert_next(lowercase__ , n - 1 )
rec_insertion_sort(lowercase__ , n - 1 )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__lowercase, __lowercase= (
collection[index],
collection[index - 1],
)
insert_next(lowercase__ , index + 1 )
if __name__ == "__main__":
lowerCAmelCase = input('''Enter integers separated by spaces: ''')
lowerCAmelCase = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 295 | 0 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__snake_case = 6378137.0
__snake_case = 6356752.314245
__snake_case = 6378137
def a ( __a , __a , __a , __a ) -> float:
'''simple docstring'''
UpperCamelCase__ :List[str] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCamelCase__ :Any = atan((1 - flattening) * tan(radians(lowercase__ ) ) )
UpperCamelCase__ :Optional[Any] = atan((1 - flattening) * tan(radians(lowercase__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCamelCase__ :Optional[Any] = haversine_distance(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCamelCase__ :str = (b_lata + b_lata) / 2
UpperCamelCase__ :Dict = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCamelCase__ :Tuple = (sin(lowercase__ ) ** 2) * (cos(lowercase__ ) ** 2)
UpperCamelCase__ :int = cos(sigma / 2 ) ** 2
UpperCamelCase__ :Optional[int] = (sigma - sin(lowercase__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCamelCase__ :Dict = (cos(lowercase__ ) ** 2) * (sin(lowercase__ ) ** 2)
UpperCamelCase__ :Union[str, Any] = sin(sigma / 2 ) ** 2
UpperCamelCase__ :str = (sigma + sin(lowercase__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 97 |
def _lowerCamelCase( lowercase__ , lowercase__ = " " ) -> list:
'''simple docstring'''
__lowercase= []
__lowercase= 0
for index, char in enumerate(lowercase__ ):
if char == separator:
split_words.append(string[last_index:index] )
__lowercase= index + 1
elif index + 1 == len(lowercase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 295 | 0 |
def UpperCamelCase ( __lowercase : Dict ,__lowercase : int ):
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(lowercase__ ,x % y )
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : List[Any] ):
'''simple docstring'''
return (x * y) // greatest_common_divisor(lowercase__ ,lowercase__ )
def UpperCamelCase ( __lowercase : Optional[Any] = 20 ):
'''simple docstring'''
A_ : Optional[int] = 1
for i in range(1 ,n + 1 ):
A_ : str = lcm(lowercase__ ,lowercase__ )
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 140 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _lowerCamelCase( lowercase__=None , lowercase__=None ) -> Dict:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowercase__ )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''The csv file to plot.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
UpperCamelCase_ : Optional[List[str]] =list_field(
default=A_ , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
try:
int(lowercase__ )
return True
except ValueError:
return False
def _lowerCamelCase( lowercase__ ) -> int:
'''simple docstring'''
try:
float(lowercase__ )
return True
except ValueError:
return False
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= args
__lowercase= defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
__lowercase= csv.DictReader(lowerCAmelCase )
for row in reader:
__lowercase= row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
__lowercase= int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
__lowercase= float(row['result'] )
def _A (self ):
__lowercase, __lowercase= plt.subplots()
__lowercase= 'Time usage' if self.args.is_time else 'Memory usage'
__lowercase= title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__lowercase= sorted(set(self.result_dict[model_name]['bsz'] ) )
__lowercase= sorted(set(self.result_dict[model_name]['seq_len'] ) )
__lowercase= self.result_dict[model_name]['result']
((__lowercase), (__lowercase))= (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__lowercase= (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__lowercase= np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCAmelCase , )
else:
__lowercase= np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__lowercase), (__lowercase))= (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
__lowercase= np.asarray(lowerCAmelCase , lowerCAmelCase )[: len(lowerCAmelCase )]
plt.scatter(
lowerCAmelCase , lowerCAmelCase , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(lowerCAmelCase , lowerCAmelCase , '--' )
title_str += f' {label_model_name} vs.'
__lowercase= title_str[:-4]
__lowercase= 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowerCAmelCase )
plt.xlabel(lowerCAmelCase )
plt.ylabel(lowerCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _lowerCamelCase( ) -> str:
'''simple docstring'''
__lowercase= HfArgumentParser(lowercase__ )
__lowercase= parser.parse_args_into_dataclasses()[0]
__lowercase= Plot(args=lowercase__ )
plot.plot()
if __name__ == "__main__":
main()
| 295 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase_ ( A_ ):
lowerCAmelCase__ = 42
class lowerCamelCase_ ( A_ , A_ ):
lowerCAmelCase__ = True
@register_to_config
def __init__( self : Dict , _A : Union[str, Any] = 3 , _A : Dict = 3 , _A : List[str] = ("DownEncoderBlock2D",) , _A : int = ("UpDecoderBlock2D",) , _A : Union[str, Any] = (64,) , _A : List[str] = 1 , _A : int = "silu" , _A : Tuple = 4 , _A : Tuple = 32 , _A : Tuple = 32 , _A : str = 0.1_8_2_1_5 , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
UpperCAmelCase__ : Union[str, Any] = Encoder(
in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , )
# pass init params to Decoder
UpperCAmelCase__ : List[Any] = Decoder(
in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , norm_num_groups=_A , act_fn=_A , )
UpperCAmelCase__ : Dict = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase__ : Union[str, Any] = nn.Convad(_A , _A , 1 )
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[Any] = False
# only relevant if vae tiling is enabled
UpperCAmelCase__ : List[Any] = self.config.sample_size
UpperCAmelCase__ : List[Any] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase__ : Optional[int] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase__ : Dict = 0.2_5
def lowercase_ ( self : Union[str, Any] , _A : str , _A : int=False ):
'''simple docstring'''
if isinstance(_A , (Encoder, Decoder) ):
UpperCAmelCase__ : Tuple = value
def lowercase_ ( self : Any , _A : Tuple = True ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = use_tiling
def lowercase_ ( self : List[str] ):
'''simple docstring'''
self.enable_tiling(_A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = True
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = {}
def fn_recursive_add_processors(_A : int , _A : Any , _A : str ):
if hasattr(_A , '''set_processor''' ):
UpperCAmelCase__ : Optional[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , _A , _A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A , _A , _A )
return processors
def lowercase_ ( self : Dict , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = len(self.attn_processors.keys() )
if isinstance(_A , _A ) and len(_A ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A : Tuple , _A : Any , _A : Any ):
if hasattr(_A , '''set_processor''' ):
if not isinstance(_A , _A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , _A , _A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A , _A , _A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowercase_ ( self : Dict , _A : Union[str, Any] , _A : Tuple = True ):
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_A , return_dict=_A )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase__ : Tuple = [self.encoder(_A ) for x_slice in x.split(1 )]
UpperCAmelCase__ : Union[str, Any] = torch.cat(_A )
else:
UpperCAmelCase__ : Dict = self.encoder(_A )
UpperCAmelCase__ : str = self.quant_conv(_A )
UpperCAmelCase__ : Optional[Any] = DiagonalGaussianDistribution(_A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_A )
def lowercase_ ( self : Optional[int] , _A : List[str] , _A : int = True ):
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_A , return_dict=_A )
UpperCAmelCase__ : str = self.post_quant_conv(_A )
UpperCAmelCase__ : Optional[int] = self.decoder(_A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
@apply_forward_hook
def lowercase_ ( self : int , _A : Any , _A : List[Any] = True ):
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase__ : str = [self._decode(_A ).sample for z_slice in z.split(1 )]
UpperCAmelCase__ : Dict = torch.cat(_A )
else:
UpperCAmelCase__ : Tuple = self._decode(_A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_A )
def lowercase_ ( self : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = min(a.shape[2] , b.shape[2] , _A )
for y in range(_A ):
UpperCAmelCase__ : int = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowercase_ ( self : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = min(a.shape[3] , b.shape[3] , _A )
for x in range(_A ):
UpperCAmelCase__ : Any = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowercase_ ( self : Any , _A : Optional[Any] , _A : str = True ):
'''simple docstring'''
UpperCAmelCase__ : Dict = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase__ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase__ : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase__ : List[str] = []
for i in range(0 , x.shape[2] , _A ):
UpperCAmelCase__ : str = []
for j in range(0 , x.shape[3] , _A ):
UpperCAmelCase__ : Dict = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase__ : Any = self.encoder(_A )
UpperCAmelCase__ : Dict = self.quant_conv(_A )
row.append(_A )
rows.append(_A )
UpperCAmelCase__ : Union[str, Any] = []
for i, row in enumerate(_A ):
UpperCAmelCase__ : Union[str, Any] = []
for j, tile in enumerate(_A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase__ : Tuple = self.blend_v(rows[i - 1][j] , _A , _A )
if j > 0:
UpperCAmelCase__ : List[str] = self.blend_h(row[j - 1] , _A , _A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_A , dim=3 ) )
UpperCAmelCase__ : Optional[int] = torch.cat(_A , dim=2 )
UpperCAmelCase__ : List[Any] = DiagonalGaussianDistribution(_A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_A )
def lowercase_ ( self : Optional[int] , _A : List[Any] , _A : Any = True ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase__ : Union[str, Any] = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase__ : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase__ : Optional[Any] = []
for i in range(0 , z.shape[2] , _A ):
UpperCAmelCase__ : List[Any] = []
for j in range(0 , z.shape[3] , _A ):
UpperCAmelCase__ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase__ : Tuple = self.post_quant_conv(_A )
UpperCAmelCase__ : Dict = self.decoder(_A )
row.append(_A )
rows.append(_A )
UpperCAmelCase__ : Any = []
for i, row in enumerate(_A ):
UpperCAmelCase__ : str = []
for j, tile in enumerate(_A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase__ : List[Any] = self.blend_v(rows[i - 1][j] , _A , _A )
if j > 0:
UpperCAmelCase__ : str = self.blend_h(row[j - 1] , _A , _A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_A , dim=3 ) )
UpperCAmelCase__ : Optional[Any] = torch.cat(_A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
def lowercase_ ( self : List[str] , _A : Optional[int] , _A : Union[str, Any] = False , _A : Union[str, Any] = True , _A : int = None , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = sample
UpperCAmelCase__ : str = self.encode(_A ).latent_dist
if sample_posterior:
UpperCAmelCase__ : List[str] = posterior.sample(generator=_A )
else:
UpperCAmelCase__ : Any = posterior.mode()
UpperCAmelCase__ : Dict = self.decode(_A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
| 181 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2,
}
lowerCAmelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-question_encoder-multiset-base''': 5_1_2,
}
lowerCAmelCase = {
'''facebook/dpr-reader-single-nq-base''': 5_1_2,
'''facebook/dpr-reader-multiset-base''': 5_1_2,
}
lowerCAmelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCAmelCase = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class A ( A_ ):
UpperCamelCase_ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase_ : Dict =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : int =DPRContextEncoderTokenizer
class A ( A_ ):
UpperCamelCase_ : Any =VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : List[Any] =DPRQuestionEncoderTokenizer
lowerCAmelCase = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCAmelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCAmelCase = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(A_ )
class A :
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , )
elif titles is None or texts is None:
__lowercase= titles if texts is None else texts
return super().__call__(
lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= titles if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [titles]
__lowercase= texts if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [texts]
__lowercase= len(lowerCAmelCase )
__lowercase= questions if not isinstance(lowerCAmelCase , lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), f'There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.'
__lowercase= super().__call__(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids']
__lowercase= super().__call__(lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase )['input_ids']
__lowercase= {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase , lowerCAmelCase )
]
}
if return_attention_mask is not False:
__lowercase= []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowercase= attention_mask
return self.pad(lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1_6 , lowerCAmelCase = 6_4 , lowerCAmelCase = 4 , ):
__lowercase= reader_input['input_ids']
__lowercase, __lowercase, __lowercase= reader_output[:3]
__lowercase= len(lowerCAmelCase )
__lowercase= sorted(range(lowerCAmelCase ) , reverse=lowerCAmelCase , key=relevance_logits.__getitem__ )
__lowercase= []
for doc_id in sorted_docs:
__lowercase= list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowercase= sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowercase= sequence_ids.index(self.pad_token_id )
else:
__lowercase= len(lowerCAmelCase )
__lowercase= self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase , top_spans=lowerCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase , start_index=lowerCAmelCase , end_index=lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= []
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowercase= sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] , reverse=lowerCAmelCase )
__lowercase= []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
__lowercase= end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A_ )
class A ( A_ , A_ ):
UpperCamelCase_ : Optional[int] =VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] =READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Dict =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] =READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Union[str, Any] =['''input_ids''', '''attention_mask''']
UpperCamelCase_ : Dict =DPRReaderTokenizer
| 295 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_A : List[Any] = logging.get_logger(__name__)
class _lowercase ( A_ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[str] = ['''pixel_values''']
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str = True , SCREAMING_SNAKE_CASE__ : List[str] = None , SCREAMING_SNAKE_CASE__ : str = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : int = True , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = True , SCREAMING_SNAKE_CASE__ : List[str] = 1 / 2_55 , SCREAMING_SNAKE_CASE__ : Optional[int] = True , SCREAMING_SNAKE_CASE__ : Tuple = None , SCREAMING_SNAKE_CASE__ : int = None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> str:
super().__init__(**SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = size if size is not None else {"""shortest_edge""": 2_56}
__lowerCAmelCase = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
__lowerCAmelCase = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name="""crop_size""" )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Tuple = None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> Tuple:
__lowerCAmelCase = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__lowerCAmelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=size["""shortest_edge"""] , default_to_square=SCREAMING_SNAKE_CASE__ )
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple = None , **SCREAMING_SNAKE_CASE__ : int , ) -> int:
__lowerCAmelCase = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE__ , size=(size["""height"""], size["""width"""]) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple = None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Tuple:
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any = None , SCREAMING_SNAKE_CASE__ : List[Any] = None , SCREAMING_SNAKE_CASE__ : List[str] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : List[Any] = None , SCREAMING_SNAKE_CASE__ : Any = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : List[Any] = None , SCREAMING_SNAKE_CASE__ : Dict = None , SCREAMING_SNAKE_CASE__ : List[Any] = None , SCREAMING_SNAKE_CASE__ : Union[str, Any] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Any , ) -> str:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name="""crop_size""" )
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_center_crop:
__lowerCAmelCase = [self.center_crop(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
__lowerCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any = None ) -> Any:
__lowerCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = target_sizes.numpy()
__lowerCAmelCase = []
for idx in range(len(SCREAMING_SNAKE_CASE__ ) ):
__lowerCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE__ )
else:
__lowerCAmelCase = logits.argmax(dim=1 )
__lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 229 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A ( nn.Module ):
def __init__(self ):
super().__init__()
__lowercase= nn.Linear(3 , 4 )
__lowercase= nn.BatchNormad(4 )
__lowercase= nn.Linear(4 , 5 )
def _A (self , lowerCAmelCase ):
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase ) ) )
class A ( A_ ):
def _A (self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ):
return (args[0] + 1,) + args[1:], kwargs
class A ( A_ ):
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return output + 1
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= ModelForTest()
__lowercase= ModelHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(test_model._hf_hook , lowerCAmelCase )
self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowerCAmelCase )
self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) )
self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= ModelHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase , append=lowerCAmelCase )
self.assertEqual(isinstance(test_model._hf_hook , lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(lowerCAmelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(lowerCAmelCase )
self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook' ) )
self.assertFalse(hasattr(lowerCAmelCase , '_old_forward' ) )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= torch.randn(2 , 3 )
__lowercase= test_model(x + 1 )
__lowercase= test_model(x + 2 )
__lowercase= PreForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__lowercase= PreForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__lowercase= SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5 )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= torch.randn(2 , 3 )
__lowercase= test_model(lowerCAmelCase )
__lowercase= PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__lowercase= PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__lowercase= SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
assert torch.allclose(lowerCAmelCase , output + 2 , atol=1E-5 )
def _A (self ):
__lowercase= ModelForTest()
__lowercase= torch.randn(2 , 3 )
__lowercase= test_model(lowerCAmelCase )
__lowercase= PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase )
__lowercase= test_model(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__lowercase= True
__lowercase= test_model(lowerCAmelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowerCAmelCase , AlignDevicesHook(io_same_device=lowerCAmelCase ) )
__lowercase= torch.randn(2 , 3 ).to(0 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , torch.device(0 ) )
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__lowercase= {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase= torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
__lowercase= {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__lowercase= 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase= torch.device(lowerCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , offload_buffers=lowerCAmelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def _A (self ):
__lowercase= ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
__lowercase= 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
__lowercase= torch.device(lowerCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() , offload_buffers=lowerCAmelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
__lowercase= torch.randn(2 , 3 )
__lowercase= model(lowerCAmelCase )
self.assertEqual(output.device , lowerCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 295 | 0 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10**-10 ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE = a
while True:
SCREAMING_SNAKE_CASE = Decimal(_SCREAMING_SNAKE_CASE ) - (
Decimal(eval(_SCREAMING_SNAKE_CASE ) ) / Decimal(eval(str(diff(_SCREAMING_SNAKE_CASE ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_SCREAMING_SNAKE_CASE ) ) < precision: # noqa: S307
return float(_SCREAMING_SNAKE_CASE )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
| 296 |
def __lowercase ( _SCREAMING_SNAKE_CASE = 10 ) -> str:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or n < 0:
raise ValueError("""Invalid input""" )
SCREAMING_SNAKE_CASE = 10**n
SCREAMING_SNAKE_CASE = 2_84_33 * (pow(2 , 7_83_04_57 , _SCREAMING_SNAKE_CASE )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(1_0) = }''')
| 296 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Dict = CycleDiffusionPipeline
__snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
__snake_case : int = PipelineTesterMixin.required_optional_params - {"latents"}
__snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
__snake_case : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__snake_case : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,num_train_timesteps=1000 ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Tuple=0 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = image / 2 + 0.5
if str(lowerCamelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = CycleDiffusionPipeline(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowerCamelCase__ ,"""half""" ):
SCREAMING_SNAKE_CASE = module.half()
SCREAMING_SNAKE_CASE = CycleDiffusionPipeline(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
SCREAMING_SNAKE_CASE = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE = """CompVis/stable-diffusion-v1-4"""
SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(lowerCamelCase__ ,subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE = CycleDiffusionPipeline.from_pretrained(
lowerCamelCase__ ,scheduler=lowerCamelCase__ ,safety_checker=lowerCamelCase__ ,torch_dtype=torch.floataa ,revision="""fp16""" )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = """A black colored car"""
SCREAMING_SNAKE_CASE = """A blue colored car"""
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase__ ,source_prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,num_inference_steps=100 ,eta=0.1 ,strength=0.85 ,guidance_scale=3 ,source_guidance_scale=1 ,generator=lowerCamelCase__ ,output_type="""np""" ,)
SCREAMING_SNAKE_CASE = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
SCREAMING_SNAKE_CASE = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE = """CompVis/stable-diffusion-v1-4"""
SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(lowerCamelCase__ ,subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE = CycleDiffusionPipeline.from_pretrained(lowerCamelCase__ ,scheduler=lowerCamelCase__ ,safety_checker=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE = """A black colored car"""
SCREAMING_SNAKE_CASE = """A blue colored car"""
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase__ ,source_prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,num_inference_steps=100 ,eta=0.1 ,strength=0.85 ,guidance_scale=3 ,source_guidance_scale=1 ,generator=lowerCamelCase__ ,output_type="""np""" ,)
SCREAMING_SNAKE_CASE = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 296 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 296 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2]
SCREAMING_SNAKE_CASE = True if """large""" in model_name or """huge""" in model_name else False
SCREAMING_SNAKE_CASE = True if """large""" in model_name or """huge""" in model_name else False
SCREAMING_SNAKE_CASE = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
SCREAMING_SNAKE_CASE = [5, 5, 5, 5]
elif "fl4" in model_name:
SCREAMING_SNAKE_CASE = [4, 4, 4, 4]
SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
if "lrf" in model_name:
SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
else:
SCREAMING_SNAKE_CASE = [2, 2, 2, 2]
if "tiny" in model_name:
SCREAMING_SNAKE_CASE = 96
elif "small" in model_name:
SCREAMING_SNAKE_CASE = 96
elif "base" in model_name:
SCREAMING_SNAKE_CASE = 1_28
elif "large" in model_name:
SCREAMING_SNAKE_CASE = 1_92
elif "xlarge" in model_name:
SCREAMING_SNAKE_CASE = 2_56
elif "huge" in model_name:
SCREAMING_SNAKE_CASE = 3_52
# set label information
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
SCREAMING_SNAKE_CASE = """imagenet-22k-id2label.json"""
else:
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = FocalNetConfig(
embed_dim=_SCREAMING_SNAKE_CASE , depths=_SCREAMING_SNAKE_CASE , focal_levels=_SCREAMING_SNAKE_CASE , focal_windows=_SCREAMING_SNAKE_CASE , use_conv_embed=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE , use_post_layernorm=_SCREAMING_SNAKE_CASE , use_layerscale=_SCREAMING_SNAKE_CASE , )
return config
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
SCREAMING_SNAKE_CASE = """encoder.""" + name
if "encoder.layers" in name:
SCREAMING_SNAKE_CASE = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
SCREAMING_SNAKE_CASE = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
SCREAMING_SNAKE_CASE = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
SCREAMING_SNAKE_CASE = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE = """layernorm.weight"""
if name == "norm.bias":
SCREAMING_SNAKE_CASE = """layernorm.bias"""
if "head" in name:
SCREAMING_SNAKE_CASE = name.replace("""head""" , """classifier""" )
else:
SCREAMING_SNAKE_CASE = """focalnet.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
print("""Checkpoint URL: """ , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE = state_dict.pop(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = val
SCREAMING_SNAKE_CASE = get_focalnet_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = FocalNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
# load state dict
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify conversion
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = BitImageProcessor(
do_resize=_SCREAMING_SNAKE_CASE , size={"""shortest_edge""": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=_SCREAMING_SNAKE_CASE , crop_size=2_24 , do_normalize=_SCREAMING_SNAKE_CASE , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
SCREAMING_SNAKE_CASE = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
SCREAMING_SNAKE_CASE = image_transforms(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _SCREAMING_SNAKE_CASE , atol=1E-4 )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
SCREAMING_SNAKE_CASE = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
SCREAMING_SNAKE_CASE = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
SCREAMING_SNAKE_CASE = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
SCREAMING_SNAKE_CASE = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
SCREAMING_SNAKE_CASE = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
SCREAMING_SNAKE_CASE = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 296 |
from pathlib import Path
import fire
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE )
dest_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
for path in src_dir.iterdir():
SCREAMING_SNAKE_CASE = [x.rstrip() for x in list(path.open().readlines() )][:n]
SCREAMING_SNAKE_CASE = dest_dir.joinpath(path.name )
print(_SCREAMING_SNAKE_CASE )
dest_path.open("""w""" ).write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
fire.Fire(minify)
| 296 | 1 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE = 10 , _SCREAMING_SNAKE_CASE = 2 ) -> str:
'''simple docstring'''
def get_dataset(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_SCREAMING_SNAKE_CASE , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
SCREAMING_SNAKE_CASE = get_dataset(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = get_dataset(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = DataLoader(_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , num_workers=4 )
SCREAMING_SNAKE_CASE = DataLoader(_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for epoch in range(_SCREAMING_SNAKE_CASE ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = batch
SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int ) -> Optional[int]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) )
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
return x * self.a + self.b
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(total_limit=1 ,project_dir=lowerCamelCase__ ,automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) ,1 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# Save initial
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""initial""" )
accelerator.save_state(lowerCamelCase__ )
((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
SCREAMING_SNAKE_CASE = train(3 ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = Accelerator()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
accelerator.load_state(lowerCamelCase__ )
((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = train(2 ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# Save everything
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""checkpoint""" )
accelerator.save_state(lowerCamelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(lowerCamelCase__ )
test_rands += train(1 ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCamelCase__ ,project_config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# Save initial
accelerator.save_state()
((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
SCREAMING_SNAKE_CASE = train(3 ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(iteration=1 ,automatic_checkpoint_naming=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCamelCase__ ,project_config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
accelerator.load_state(os.path.join(lowerCamelCase__ ,"""checkpoints""" ,"""checkpoint_0""" ) )
((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = train(2 ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ ,"""checkpoints""" ,"""checkpoint_1""" ) )
test_rands += train(1 ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
((SCREAMING_SNAKE_CASE), (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE = torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE = Accelerator()
with self.assertRaises(lowerCamelCase__ ) as ve:
accelerator.register_for_checkpointing(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
SCREAMING_SNAKE_CASE = torch.optim.lr_scheduler.StepLR(lowerCamelCase__ ,step_size=1 ,gamma=0.99 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCamelCase__ ,project_config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE = scheduler.state_dict()
train(3 ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ ,scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ ,"""checkpoints""" ,"""checkpoint_0""" ) )
self.assertEqual(lowerCamelCase__ ,scheduler.state_dict() )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ ,total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCamelCase__ ,project_config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = accelerator.prepare(lowerCamelCase__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCamelCase__ ,"""checkpoints""" ,"""checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ ,"""checkpoints""" ,"""checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ ,"""checkpoints""" ,"""checkpoint_10""" ) ) )
@require_cuda
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCamelCase__ ,env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = """/tmp/accelerate/state_checkpointing"""
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
SCREAMING_SNAKE_CASE_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE_ = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
SCREAMING_SNAKE_CASE_ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE_ = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE_ = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 296 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/dummy-config.json""")
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""fake-roberta""" )
os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ ,"""config.json""" ) ,"""w""" ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(type(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
try:
AutoConfig.register("""custom""" ,lowerCamelCase__ )
# Wrong model type will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoConfig.register("""model""" ,lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoConfig.register("""bert""" ,lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""bert-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,revision="""aaaaaa""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" ,):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_config.__class__.__name__ ,"""NewModelConfig""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = "new-model"
try:
AutoConfig.register("""new-model""" ,lowerCamelCase__ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 296 | 1 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
SCREAMING_SNAKE_CASE_ = True
from torch.cuda.amp import autocast
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> str:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__snake_case : Optional[bool] = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__snake_case : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
__snake_case : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
__snake_case : Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
__snake_case : Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
__snake_case : Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
__snake_case : Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__snake_case : Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__snake_case : Optional[int] = field(
default=lowerCAmelCase_ , metadata={"help": "The number of processes to use for the preprocessing."} , )
__snake_case : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__snake_case : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
__snake_case : List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : WavaVecaProcessor
__snake_case : Union[bool, str] = True
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
__snake_case : Optional[int] = None
def __call__( self : Optional[int] ,lowerCamelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [{"""input_values""": feature["""input_values"""]} for feature in features]
SCREAMING_SNAKE_CASE = [{"""input_ids""": feature["""labels"""]} for feature in features]
SCREAMING_SNAKE_CASE = self.processor.pad(
lowerCamelCase__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,)
SCREAMING_SNAKE_CASE = self.processor.pad(
labels=lowerCamelCase__ ,padding=self.padding ,max_length=self.max_length_labels ,pad_to_multiple_of=self.pad_to_multiple_of_labels ,return_tensors="""pt""" ,)
# replace padding with -100 to ignore loss correctly
SCREAMING_SNAKE_CASE = labels_batch["""input_ids"""].masked_fill(labels_batch.attention_mask.ne(1 ) ,-100 )
SCREAMING_SNAKE_CASE = labels
return batch
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : nn.Module ,lowerCamelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
SCREAMING_SNAKE_CASE = self._prepare_inputs(lowerCamelCase__ )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE = self.compute_loss(lowerCamelCase__ ,lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = self.compute_loss(lowerCamelCase__ ,lowerCamelCase__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE = loss.sum() / (inputs["""labels"""] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCamelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCamelCase__ ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCamelCase__ )
else:
loss.backward()
return loss.detach()
def __lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , _SCREAMING_SNAKE_CASE )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
SCREAMING_SNAKE_CASE = datasets.load_dataset(
"""common_voice""" , data_args.dataset_config_name , split=data_args.train_split_name )
SCREAMING_SNAKE_CASE = datasets.load_dataset("""common_voice""" , data_args.dataset_config_name , split="""test""" )
# Create and save tokenizer
SCREAMING_SNAKE_CASE = F"""[{''.join(data_args.chars_to_ignore )}]"""
def remove_special_characters(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = re.sub(_SCREAMING_SNAKE_CASE , """""" , batch["""sentence"""] ).lower() + """ """
return batch
SCREAMING_SNAKE_CASE = train_dataset.map(_SCREAMING_SNAKE_CASE , remove_columns=["""sentence"""] )
SCREAMING_SNAKE_CASE = eval_dataset.map(_SCREAMING_SNAKE_CASE , remove_columns=["""sentence"""] )
def extract_all_chars(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = """ """.join(batch["""text"""] )
SCREAMING_SNAKE_CASE = list(set(_SCREAMING_SNAKE_CASE ) )
return {"vocab": [vocab], "all_text": [all_text]}
SCREAMING_SNAKE_CASE = train_dataset.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , batch_size=-1 , keep_in_memory=_SCREAMING_SNAKE_CASE , remove_columns=train_dataset.column_names , )
SCREAMING_SNAKE_CASE = train_dataset.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , batch_size=-1 , keep_in_memory=_SCREAMING_SNAKE_CASE , remove_columns=eval_dataset.column_names , )
SCREAMING_SNAKE_CASE = list(set(vocab_train["""vocab"""][0] ) | set(vocab_test["""vocab"""][0] ) )
SCREAMING_SNAKE_CASE = {v: k for k, v in enumerate(_SCREAMING_SNAKE_CASE )}
SCREAMING_SNAKE_CASE = vocab_dict[""" """]
del vocab_dict[" "]
SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE )
with open("""vocab.json""" , """w""" ) as vocab_file:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
"""vocab.json""" , unk_token="""[UNK]""" , pad_token="""[PAD]""" , word_delimiter_token="""|""" , )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="""mean""" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE = min(len(_SCREAMING_SNAKE_CASE ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE = train_dataset.select(range(_SCREAMING_SNAKE_CASE ) )
if data_args.max_val_samples is not None:
SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_val_samples ) )
SCREAMING_SNAKE_CASE = torchaudio.transforms.Resample(4_80_00 , 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = torchaudio.load(batch["""path"""] )
SCREAMING_SNAKE_CASE = resampler(_SCREAMING_SNAKE_CASE ).squeeze().numpy()
SCREAMING_SNAKE_CASE = 1_60_00
SCREAMING_SNAKE_CASE = batch["""text"""]
return batch
SCREAMING_SNAKE_CASE = train_dataset.map(
_SCREAMING_SNAKE_CASE , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE = eval_dataset.map(
_SCREAMING_SNAKE_CASE , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_SCREAMING_SNAKE_CASE ):
# check that all files have the correct sampling rate
assert (
len(set(batch["""sampling_rate"""] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
SCREAMING_SNAKE_CASE = processor(
audio=batch["""speech"""] , text=batch["""target_text"""] , sampling_rate=batch["""sampling_rate"""][0] )
batch.update(_SCREAMING_SNAKE_CASE )
return batch
SCREAMING_SNAKE_CASE = train_dataset.map(
_SCREAMING_SNAKE_CASE , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE = eval_dataset.map(
_SCREAMING_SNAKE_CASE , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , )
# Metric
SCREAMING_SNAKE_CASE = datasets.load_metric("""wer""" )
def compute_metrics(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = pred.predictions
SCREAMING_SNAKE_CASE = np.argmax(_SCREAMING_SNAKE_CASE , axis=-1 )
SCREAMING_SNAKE_CASE = processor.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE = processor.batch_decode(_SCREAMING_SNAKE_CASE )
# we do not want to group tokens when computing the metrics
SCREAMING_SNAKE_CASE = processor.batch_decode(pred.label_ids , group_tokens=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = wer_metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
SCREAMING_SNAKE_CASE = DataCollatorCTCWithPadding(processor=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
# Initialize our Trainer
SCREAMING_SNAKE_CASE = CTCTrainer(
model=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
SCREAMING_SNAKE_CASE = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
SCREAMING_SNAKE_CASE = model_args.model_name_or_path
else:
SCREAMING_SNAKE_CASE = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model()
SCREAMING_SNAKE_CASE = train_result.metrics
SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_SCREAMING_SNAKE_CASE )
)
SCREAMING_SNAKE_CASE = min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("""train""" , _SCREAMING_SNAKE_CASE )
trainer.save_metrics("""train""" , _SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
SCREAMING_SNAKE_CASE = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE = trainer.evaluate()
SCREAMING_SNAKE_CASE = data_args.max_val_samples if data_args.max_val_samples is not None else len(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("""eval""" , _SCREAMING_SNAKE_CASE )
trainer.save_metrics("""eval""" , _SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
main()
| 296 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = "git_vision_model"
def __init__( self : List[Any] ,lowerCamelCase__ : Dict=768 ,lowerCamelCase__ : Union[str, Any]=3072 ,lowerCamelCase__ : Optional[int]=12 ,lowerCamelCase__ : Tuple=12 ,lowerCamelCase__ : Tuple=3 ,lowerCamelCase__ : Optional[Any]=224 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]="quick_gelu" ,lowerCamelCase__ : Optional[Any]=1e-5 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : Optional[int]=0.02 ,**lowerCamelCase__ : Union[str, Any] ,) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = "git"
def __init__( self : Optional[int] ,lowerCamelCase__ : int=None ,lowerCamelCase__ : str=30522 ,lowerCamelCase__ : Tuple=768 ,lowerCamelCase__ : Union[str, Any]=6 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : List[str]=3072 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : List[str]=1024 ,lowerCamelCase__ : List[str]=0.02 ,lowerCamelCase__ : str=1e-1_2 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Optional[int]="absolute" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=False ,lowerCamelCase__ : int=101 ,lowerCamelCase__ : int=102 ,lowerCamelCase__ : Dict=None ,**lowerCamelCase__ : List[Any] ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,pad_token_id=lowerCamelCase__ ,**lowerCamelCase__ )
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE = GitVisionConfig(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = num_image_with_embedding
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 296 | 1 |
import pprint
import requests
SCREAMING_SNAKE_CASE_ = """https://zenquotes.io/api"""
def __lowercase ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __lowercase ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = random_quotes()
pprint.pprint(response)
| 296 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = random.Random()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE = global_rng
SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : Optional[Any]=400 ,lowerCamelCase__ : List[str]=2000 ,lowerCamelCase__ : List[str]=2048 ,lowerCamelCase__ : Any=128 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : str=512 ,lowerCamelCase__ : Optional[Any]=30 ,lowerCamelCase__ : Tuple=44100 ,) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = min_seq_length
SCREAMING_SNAKE_CASE = max_seq_length
SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE = spectrogram_length
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = num_audio_channels
SCREAMING_SNAKE_CASE = hop_length
SCREAMING_SNAKE_CASE = chunk_length
SCREAMING_SNAKE_CASE = sampling_rate
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=False ) -> str:
'''simple docstring'''
def _flatten(lowerCamelCase__ : List[Any] ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : List[Any] = TvltFeatureExtractor
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TvltFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,"""spectrogram_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""feature_size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""num_audio_channels""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""hop_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""chunk_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""sampling_rate""" ) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""feat_extract.json""" )
feat_extract_first.to_json_file(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE = feature_extractor(
lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ,mask_audio=lowerCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(lowerCamelCase__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape ,(1, 1, 192, 128) )
SCREAMING_SNAKE_CASE = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,lowerCamelCase__ ,atol=1e-4 ) )
| 296 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
SCREAMING_SNAKE_CASE_ = """src/diffusers"""
SCREAMING_SNAKE_CASE_ = """."""
# This is to make sure the diffusers module imported is the one in the repo.
SCREAMING_SNAKE_CASE_ = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
SCREAMING_SNAKE_CASE_ = spec.loader.load_module()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return line.startswith(_SCREAMING_SNAKE_CASE ) or len(_SCREAMING_SNAKE_CASE ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , _SCREAMING_SNAKE_CASE ) is not None
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = object_name.split(""".""" )
SCREAMING_SNAKE_CASE = 0
# First let's find the module where our object lives.
SCREAMING_SNAKE_CASE = parts[i]
while i < len(_SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , F"""{module}.py""" ) ):
i += 1
if i < len(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = os.path.join(_SCREAMING_SNAKE_CASE , parts[i] )
if i >= len(_SCREAMING_SNAKE_CASE ):
raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(_SCREAMING_SNAKE_CASE , F"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
# Now let's find the class / func in the code!
SCREAMING_SNAKE_CASE = """"""
SCREAMING_SNAKE_CASE = 0
for name in parts[i + 1 :]:
while (
line_index < len(_SCREAMING_SNAKE_CASE ) and re.search(rF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_SCREAMING_SNAKE_CASE ):
raise ValueError(F""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
SCREAMING_SNAKE_CASE = line_index
while line_index < len(_SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index] , _SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE = lines[start_index:line_index]
return "".join(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
SCREAMING_SNAKE_CASE_ = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""")
SCREAMING_SNAKE_CASE_ = re.compile(r"""<FILL\s+[^>]*>""")
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = code.split("""\n""" )
SCREAMING_SNAKE_CASE = 0
while idx < len(_SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_SCREAMING_SNAKE_CASE ):
return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(get_indent(_SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
SCREAMING_SNAKE_CASE = F"""class Bla:\n{code}"""
SCREAMING_SNAKE_CASE = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = black.format_str(_SCREAMING_SNAKE_CASE , mode=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = style_docstrings_in_code(_SCREAMING_SNAKE_CASE )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = search.groups()
SCREAMING_SNAKE_CASE = find_code_in_diffusers(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = get_indent(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = line_index + 1 if indent == theoretical_indent else line_index + 2
SCREAMING_SNAKE_CASE = theoretical_indent
SCREAMING_SNAKE_CASE = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
SCREAMING_SNAKE_CASE = True
while line_index < len(_SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(_SCREAMING_SNAKE_CASE ):
break
SCREAMING_SNAKE_CASE = lines[line_index]
SCREAMING_SNAKE_CASE = _should_continue(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and re.search(F"""^{indent}# End copy""" , _SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE = lines[start_index:line_index]
SCREAMING_SNAKE_CASE = """""".join(_SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
SCREAMING_SNAKE_CASE = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(_SCREAMING_SNAKE_CASE ) is None]
SCREAMING_SNAKE_CASE = """\n""".join(_SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(_SCREAMING_SNAKE_CASE ) > 0:
SCREAMING_SNAKE_CASE = replace_pattern.replace("""with""" , """""" ).split(""",""" )
SCREAMING_SNAKE_CASE = [_re_replace_pattern.search(_SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = pattern.groups()
SCREAMING_SNAKE_CASE = re.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
SCREAMING_SNAKE_CASE = re.sub(obja.lower() , obja.lower() , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = re.sub(obja.upper() , obja.upper() , _SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
SCREAMING_SNAKE_CASE = blackify(lines[start_index - 1] + theoretical_code )
SCREAMING_SNAKE_CASE = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
SCREAMING_SNAKE_CASE = lines[:start_index] + [theoretical_code] + lines[line_index:]
SCREAMING_SNAKE_CASE = start_index + 1
if overwrite and len(_SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(F"""Detected changes, rewriting {filename}.""" )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_SCREAMING_SNAKE_CASE )
return diffs
def __lowercase ( _SCREAMING_SNAKE_CASE = False ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = glob.glob(os.path.join(_SCREAMING_SNAKE_CASE , """**/*.py""" ) , recursive=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = []
for filename in all_files:
SCREAMING_SNAKE_CASE = is_copy_consistent(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(_SCREAMING_SNAKE_CASE ) > 0:
SCREAMING_SNAKE_CASE = """\n""".join(_SCREAMING_SNAKE_CASE )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 296 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
SCREAMING_SNAKE_CASE = [1_44, 1_92, 2_40]
SCREAMING_SNAKE_CASE = [16, 32, 64, 96, 1_28, 1_60, 6_40]
elif "mobilevit_xs" in mobilevit_name:
SCREAMING_SNAKE_CASE = [96, 1_20, 1_44]
SCREAMING_SNAKE_CASE = [16, 32, 48, 64, 80, 96, 3_84]
elif "mobilevit_xxs" in mobilevit_name:
SCREAMING_SNAKE_CASE = [64, 80, 96]
SCREAMING_SNAKE_CASE = [16, 16, 24, 48, 64, 80, 3_20]
SCREAMING_SNAKE_CASE = 0.05
SCREAMING_SNAKE_CASE = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
SCREAMING_SNAKE_CASE = 5_12
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 21
SCREAMING_SNAKE_CASE = """pascal-voc-id2label.json"""
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
SCREAMING_SNAKE_CASE = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
SCREAMING_SNAKE_CASE = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
SCREAMING_SNAKE_CASE = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
SCREAMING_SNAKE_CASE = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
SCREAMING_SNAKE_CASE = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
SCREAMING_SNAKE_CASE = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
SCREAMING_SNAKE_CASE = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
SCREAMING_SNAKE_CASE = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
SCREAMING_SNAKE_CASE = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
SCREAMING_SNAKE_CASE = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
SCREAMING_SNAKE_CASE = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
SCREAMING_SNAKE_CASE = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
SCREAMING_SNAKE_CASE = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
SCREAMING_SNAKE_CASE = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
SCREAMING_SNAKE_CASE = """mobilevit.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict:
'''simple docstring'''
if base_model:
SCREAMING_SNAKE_CASE = """"""
else:
SCREAMING_SNAKE_CASE = """mobilevit."""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if key[:8] == "encoder.":
SCREAMING_SNAKE_CASE = key[8:]
if "qkv" in key:
SCREAMING_SNAKE_CASE = key.split(""".""" )
SCREAMING_SNAKE_CASE = int(key_split[0][6:] ) - 1
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
SCREAMING_SNAKE_CASE = layer.transformer.layer[transformer_num].attention.attention.all_head_size
SCREAMING_SNAKE_CASE = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[:dim]
SCREAMING_SNAKE_CASE = val[dim : dim * 2]
SCREAMING_SNAKE_CASE = val[-dim:]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowercase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_mobilevit_config(_SCREAMING_SNAKE_CASE )
# load original state_dict
SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval()
else:
SCREAMING_SNAKE_CASE = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
SCREAMING_SNAKE_CASE = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
else:
assert logits.shape == (1, 10_00)
if mobilevit_name == "mobilevit_s":
SCREAMING_SNAKE_CASE = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
SCREAMING_SNAKE_CASE = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
SCREAMING_SNAKE_CASE = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
SCREAMING_SNAKE_CASE = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
SCREAMING_SNAKE_CASE = model_mapping[mobilevit_name]
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 296 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE = """"""
else:
SCREAMING_SNAKE_CASE = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dct.pop(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = val
def __lowercase ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DeiTConfig()
# all deit models have fine-tuned heads
SCREAMING_SNAKE_CASE = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = int(deit_name[-6:-4] )
SCREAMING_SNAKE_CASE = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
SCREAMING_SNAKE_CASE = 1_92
SCREAMING_SNAKE_CASE = 7_68
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 3
elif deit_name[9:].startswith("""small""" ):
SCREAMING_SNAKE_CASE = 3_84
SCREAMING_SNAKE_CASE = 15_36
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
SCREAMING_SNAKE_CASE = 10_24
SCREAMING_SNAKE_CASE = 40_96
SCREAMING_SNAKE_CASE = 24
SCREAMING_SNAKE_CASE = 16
# load original model from timm
SCREAMING_SNAKE_CASE = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE = timm_model.state_dict()
SCREAMING_SNAKE_CASE = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
SCREAMING_SNAKE_CASE = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
SCREAMING_SNAKE_CASE = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
SCREAMING_SNAKE_CASE = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE , crop_size=config.image_size )
SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = encoding["""pixel_values"""]
SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 296 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[str] = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : int = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[str] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 296 | 1 |
from __future__ import annotations
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE = str(_SCREAMING_SNAKE_CASE )
return len(_SCREAMING_SNAKE_CASE ) == 9 and set(_SCREAMING_SNAKE_CASE ) == set("""123456789""" )
def __lowercase ( ) -> int | None:
'''simple docstring'''
for base_num in range(99_99 , 49_99 , -1 ):
SCREAMING_SNAKE_CASE = 10_00_02 * base_num
if is_9_pandigital(_SCREAMING_SNAKE_CASE ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
SCREAMING_SNAKE_CASE = 1_00_20_03 * base_num
if is_9_pandigital(_SCREAMING_SNAKE_CASE ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 296 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = "table-transformer"
__snake_case : Union[str, Any] = ["past_key_values"]
__snake_case : List[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Optional[int]=100 ,lowerCamelCase__ : List[Any]=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : List[Any]=8 ,lowerCamelCase__ : Dict=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : Any=8 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : int=0.0 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Optional[int]="relu" ,lowerCamelCase__ : Tuple=256 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : List[Any]=0.02 ,lowerCamelCase__ : int=1.0 ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Optional[Any]="sine" ,lowerCamelCase__ : List[str]="resnet50" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : Dict=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : Any=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : str=0.1 ,**lowerCamelCase__ : List[str] ,) -> Optional[int]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = backbone_config.get("""model_type""" )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = None, None, None
SCREAMING_SNAKE_CASE = use_timm_backbone
SCREAMING_SNAKE_CASE = backbone_config
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = num_queries
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = init_xavier_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = auxiliary_loss
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = backbone
SCREAMING_SNAKE_CASE = use_pretrained_backbone
SCREAMING_SNAKE_CASE = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE = class_cost
SCREAMING_SNAKE_CASE = bbox_cost
SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE = mask_loss_coefficient
SCREAMING_SNAKE_CASE = dice_loss_coefficient
SCREAMING_SNAKE_CASE = bbox_loss_coefficient
SCREAMING_SNAKE_CASE = giou_loss_coefficient
SCREAMING_SNAKE_CASE = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase__ ,**lowerCamelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.d_model
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> float:
'''simple docstring'''
return 1e-5
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
'''simple docstring'''
return 12
| 296 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Any = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : List[str] ,**lowerCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE = deprecated_arg[3:]
setattr(self ,lowerCamelCase__ ,not kwargs.pop(lowerCamelCase__ ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
SCREAMING_SNAKE_CASE = kwargs.pop("""torchscript""" ,self.torchscript )
SCREAMING_SNAKE_CASE = kwargs.pop("""torch_xla_tpu_print_metrics""" ,self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE = kwargs.pop("""fp16_opt_level""" ,self.fpaa_opt_level )
super().__init__(**lowerCamelCase__ )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Trace the models using torchscript"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
__snake_case : str = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple["torch.device", int]:
'''simple docstring'''
requires_backends(self ,["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
SCREAMING_SNAKE_CASE = torch.device("""cpu""" )
SCREAMING_SNAKE_CASE = 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE = xm.xla_device()
SCREAMING_SNAKE_CASE = 0
else:
SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
SCREAMING_SNAKE_CASE = torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
'''simple docstring'''
requires_backends(self ,["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> "torch.device":
'''simple docstring'''
requires_backends(self ,["""torch"""] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self ,["""torch"""] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
return self.n_gpu > 0
| 296 |
from collections import defaultdict
from math import gcd
def __lowercase ( _SCREAMING_SNAKE_CASE = 1_50_00_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = defaultdict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _SCREAMING_SNAKE_CASE , 2 ):
if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > 1:
continue
SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_SCREAMING_SNAKE_CASE , limit + 1 , _SCREAMING_SNAKE_CASE ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 296 | 1 |
import math
import sys
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """"""
try:
with open(_SCREAMING_SNAKE_CASE , """rb""" ) as binary_file:
SCREAMING_SNAKE_CASE = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {"""0""": """0""", """1""": """1"""}
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = """""", """"""
SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
SCREAMING_SNAKE_CASE = last_match_id + """0"""
if math.loga(_SCREAMING_SNAKE_CASE ).is_integer():
SCREAMING_SNAKE_CASE = {}
for curr_key in list(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = lexicon.pop(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = new_lex
SCREAMING_SNAKE_CASE = last_match_id + """1"""
index += 1
SCREAMING_SNAKE_CASE = """"""
return result
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 8
try:
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as opened_file:
SCREAMING_SNAKE_CASE = [
to_write[i : i + byte_length]
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
SCREAMING_SNAKE_CASE = data_bits[counter:]
SCREAMING_SNAKE_CASE = data_bits[counter + 1 :]
return data_bits
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = read_file_binary(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = remove_prefix(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = decompress_data(_SCREAMING_SNAKE_CASE )
write_file_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 296 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Let's go
SCREAMING_SNAKE_CASE = parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE = args.func(_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 296 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SwinConfig(image_size=1_92 )
if "base" in model_name:
SCREAMING_SNAKE_CASE = 6
SCREAMING_SNAKE_CASE = 1_28
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (4, 8, 16, 32)
elif "large" in model_name:
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 1_92
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (6, 12, 24, 48)
else:
raise ValueError("""Model not supported, only supports base and large variants""" )
SCREAMING_SNAKE_CASE = window_size
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = num_heads
return config
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
if "encoder.mask_token" in name:
SCREAMING_SNAKE_CASE = name.replace("""encoder.mask_token""" , """embeddings.mask_token""" )
if "encoder.patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "encoder.patch_embed.norm" in name:
SCREAMING_SNAKE_CASE = name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE = """layernorm.weight"""
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE = """layernorm.bias"""
if "decoder" in name:
pass
else:
SCREAMING_SNAKE_CASE = """swin.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "attn_mask" in key:
pass
elif "qkv" in key:
SCREAMING_SNAKE_CASE = key.split(""".""" )
SCREAMING_SNAKE_CASE = int(key_split[2] )
SCREAMING_SNAKE_CASE = int(key_split[4] )
SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[
:dim
]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE = get_swin_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = SwinForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = ViTImageProcessor(size={"""height""": 1_92, """width""": 1_92} )
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits
print(outputs.keys() )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 296 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SwinConfig()
SCREAMING_SNAKE_CASE = swin_name.split("""_""" )
SCREAMING_SNAKE_CASE = name_split[1]
SCREAMING_SNAKE_CASE = int(name_split[4] )
SCREAMING_SNAKE_CASE = int(name_split[3][-1] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE = 1_28
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE = 1_92
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (6, 12, 24, 48)
if "in22k" in swin_name:
SCREAMING_SNAKE_CASE = 2_18_41
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = img_size
SCREAMING_SNAKE_CASE = num_classes
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = window_size
return config
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
SCREAMING_SNAKE_CASE = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE = """layernorm.weight"""
if name == "norm.bias":
SCREAMING_SNAKE_CASE = """layernorm.bias"""
if "head" in name:
SCREAMING_SNAKE_CASE = name.replace("""head""" , """classifier""" )
else:
SCREAMING_SNAKE_CASE = """swin.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE = key.split(""".""" )
SCREAMING_SNAKE_CASE = int(key_split[1] )
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[
:dim
]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
SCREAMING_SNAKE_CASE = get_swin_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = SwinForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = timm_model(inputs["""pixel_values"""] )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 296 | 1 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__snake_case : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__snake_case : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Use FP16 to accelerate inference."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Benchmark training of model"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Verbose memory tracing"} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Trace memory line by line"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save result to a CSV file"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save all print statements in a log file"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to print environment information"} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__snake_case : str = field(
default=F"inference_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv."} , )
__snake_case : str = field(
default=F"inference_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__snake_case : str = field(
default=F"train_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__snake_case : str = field(
default=F"train_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__snake_case : str = field(
default=F"env_info_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving environment information."} , )
__snake_case : str = field(
default=F"log_{round(time() )}.csv" , metadata={"help": "Log filename used if print statements are saved in log."} , )
__snake_case : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" ,lowerCamelCase__ ,)
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) ,indent=2 )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 296 |
import os
from distutils.util import strtobool
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
for e in env_keys:
SCREAMING_SNAKE_CASE = int(os.environ.get(_SCREAMING_SNAKE_CASE , -1 ) )
if val >= 0:
return val
return default
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) )
return strtobool(_SCREAMING_SNAKE_CASE ) == 1 # As its name indicates `strtobool` actually returns an int...
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="no" ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) )
return value
| 296 | 1 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE_ = 1_6
SCREAMING_SNAKE_CASE_ = 3_2
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE = 8
else:
SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""train"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE_ = mocked_dataloaders # noqa: F811
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _SCREAMING_SNAKE_CASE ) == "1":
SCREAMING_SNAKE_CASE = 2
# Initialize accelerator
SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE = config["""lr"""]
SCREAMING_SNAKE_CASE = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE = int(config["""seed"""] )
SCREAMING_SNAKE_CASE = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE )
def inner_training_loop(_SCREAMING_SNAKE_CASE ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate scheduler
SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = outputs.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __lowercase ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 296 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__snake_case : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__snake_case : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Use FP16 to accelerate inference."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Benchmark training of model"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Verbose memory tracing"} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Trace memory line by line"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save result to a CSV file"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save all print statements in a log file"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to print environment information"} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__snake_case : str = field(
default=F"inference_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv."} , )
__snake_case : str = field(
default=F"inference_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__snake_case : str = field(
default=F"train_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__snake_case : str = field(
default=F"train_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__snake_case : str = field(
default=F"env_info_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving environment information."} , )
__snake_case : str = field(
default=F"log_{round(time() )}.csv" , metadata={"help": "Log filename used if print statements are saved in log."} , )
__snake_case : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" ,lowerCamelCase__ ,)
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) ,indent=2 )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 296 | 1 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", """stage2.cls_token""") )
return token
def __lowercase ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = CvtConfig(num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
SCREAMING_SNAKE_CASE = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
SCREAMING_SNAKE_CASE = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
SCREAMING_SNAKE_CASE = [2, 2, 20]
SCREAMING_SNAKE_CASE = [3, 12, 16]
SCREAMING_SNAKE_CASE = [1_92, 7_68, 10_24]
SCREAMING_SNAKE_CASE = CvtForImageClassification(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device("""cpu""" ) )
SCREAMING_SNAKE_CASE = OrderedDict()
SCREAMING_SNAKE_CASE = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
SCREAMING_SNAKE_CASE = list_of_state_dict + cls_token(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = list_of_state_dict + embeddings(_SCREAMING_SNAKE_CASE )
for cnt in range(config.depth[idx] ):
SCREAMING_SNAKE_CASE = list_of_state_dict + attention(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_8_4,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 296 |
import math
import unittest
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) ,"""Zero doesn't have any positive factors, primes must have exactly two.""" ,)
self.assertFalse(
is_prime(1 ) ,"""One only has 1 positive factor, primes must have exactly two.""" ,)
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 296 | 1 |
from __future__ import annotations
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : Any ,lowerCamelCase__ : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = order
# a_{0} ... a_{k}
SCREAMING_SNAKE_CASE = [1.0] + [0.0] * order
# b_{0} ... b_{k}
SCREAMING_SNAKE_CASE = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
SCREAMING_SNAKE_CASE = [0.0] * self.order
# y[n-1] ... y[n-k]
SCREAMING_SNAKE_CASE = [0.0] * self.order
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : list[float] ,lowerCamelCase__ : list[float] ) -> None:
'''simple docstring'''
if len(lowerCamelCase__ ) < self.order:
SCREAMING_SNAKE_CASE = [1.0, *a_coeffs]
if len(lowerCamelCase__ ) != self.order + 1:
SCREAMING_SNAKE_CASE = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(lowerCamelCase__ )}"""
)
raise ValueError(lowerCamelCase__ )
if len(lowerCamelCase__ ) != self.order + 1:
SCREAMING_SNAKE_CASE = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(lowerCamelCase__ )}"""
)
raise ValueError(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = a_coeffs
SCREAMING_SNAKE_CASE = b_coeffs
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : float ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 ,self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
SCREAMING_SNAKE_CASE = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
SCREAMING_SNAKE_CASE = self.input_history[:-1]
SCREAMING_SNAKE_CASE = self.output_history[:-1]
SCREAMING_SNAKE_CASE = sample
SCREAMING_SNAKE_CASE = result
return result
| 296 |
import random
class UpperCamelCase__ :
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : str ) -> tuple[list[int], list[int]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [ord(lowerCamelCase__ ) for i in text]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i in plain:
SCREAMING_SNAKE_CASE = random.randint(1 ,300 )
SCREAMING_SNAKE_CASE = (i + k) * k
cipher.append(lowerCamelCase__ )
key.append(lowerCamelCase__ )
return cipher, key
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : list[int] ,lowerCamelCase__ : list[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for i in range(len(lowerCamelCase__ ) ):
SCREAMING_SNAKE_CASE = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCamelCase__ ) )
return "".join(lowerCamelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 296 | 1 |
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE_ = """src/transformers"""
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE_ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE_ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE_ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
SCREAMING_SNAKE_CASE_ = re.compile(r"""^\s*try:""")
# Catches a line with else:
SCREAMING_SNAKE_CASE_ = re.compile(r"""^\s*else:""")
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if _re_test_backend.search(_SCREAMING_SNAKE_CASE ) is None:
return None
SCREAMING_SNAKE_CASE = [b[0] for b in _re_backend.findall(_SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(_SCREAMING_SNAKE_CASE )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
SCREAMING_SNAKE_CASE = 0
while line_index < len(_SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
SCREAMING_SNAKE_CASE = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
SCREAMING_SNAKE_CASE = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ).groups()[0]
SCREAMING_SNAKE_CASE = re.findall("""\[([^\]]+)\]""" , _SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
SCREAMING_SNAKE_CASE = _re_import_struct_key_value.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
SCREAMING_SNAKE_CASE = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
SCREAMING_SNAKE_CASE = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
SCREAMING_SNAKE_CASE = lines[line_index]
if _re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ) is not None:
SCREAMING_SNAKE_CASE = _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(""", """ )
SCREAMING_SNAKE_CASE = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(_SCREAMING_SNAKE_CASE ) is not None:
SCREAMING_SNAKE_CASE = _re_between_brackets.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(""", """ )
SCREAMING_SNAKE_CASE = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
SCREAMING_SNAKE_CASE = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
SCREAMING_SNAKE_CASE = []
while (
line_index < len(_SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
SCREAMING_SNAKE_CASE = lines[line_index]
SCREAMING_SNAKE_CASE = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
SCREAMING_SNAKE_CASE = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(_SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
SCREAMING_SNAKE_CASE = lines[line_index]
SCREAMING_SNAKE_CASE = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
SCREAMING_SNAKE_CASE = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
def find_duplicates(_SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(_SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
SCREAMING_SNAKE_CASE = []
for key in import_dict_objects.keys():
SCREAMING_SNAKE_CASE = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
SCREAMING_SNAKE_CASE = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
SCREAMING_SNAKE_CASE = """base imports""" if key == """none""" else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __lowercase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
SCREAMING_SNAKE_CASE = os.path.join(_SCREAMING_SNAKE_CASE , """__init__.py""" )
SCREAMING_SNAKE_CASE = parse_init(_SCREAMING_SNAKE_CASE )
if objects is not None:
SCREAMING_SNAKE_CASE = analyze_results(*_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
SCREAMING_SNAKE_CASE = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("""\n""".join(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError("""\n\n""".join(_SCREAMING_SNAKE_CASE ) )
def __lowercase ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for path, directories, files in os.walk(_SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(_SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_SCREAMING_SNAKE_CASE ) / folder).glob("""*.py""" ) ) ) == 0:
continue
SCREAMING_SNAKE_CASE = str((Path(_SCREAMING_SNAKE_CASE ) / folder).relative_to(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE = short_path.replace(os.path.sep , """.""" )
submodules.append(_SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
SCREAMING_SNAKE_CASE = str((Path(_SCREAMING_SNAKE_CASE ) / fname).relative_to(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(_SCREAMING_SNAKE_CASE )
return submodules
SCREAMING_SNAKE_CASE_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(_SCREAMING_SNAKE_CASE , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
SCREAMING_SNAKE_CASE = spec.loader.load_module()
SCREAMING_SNAKE_CASE = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_SCREAMING_SNAKE_CASE ) > 0:
SCREAMING_SNAKE_CASE = """\n""".join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
F"""{list_of_modules}\n"""
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 296 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = "efficientformer"
def __init__( self : Optional[int] ,lowerCamelCase__ : List[int] = [3, 2, 6, 4] ,lowerCamelCase__ : List[int] = [48, 96, 224, 448] ,lowerCamelCase__ : List[bool] = [True, True, True, True] ,lowerCamelCase__ : int = 448 ,lowerCamelCase__ : int = 32 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : int = 7 ,lowerCamelCase__ : int = 5 ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : float = 1e-5 ,lowerCamelCase__ : str = "gelu" ,lowerCamelCase__ : float = 0.02 ,lowerCamelCase__ : float = 1e-1_2 ,lowerCamelCase__ : int = 224 ,lowerCamelCase__ : float = 1e-0_5 ,**lowerCamelCase__ : str ,) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_expansion_ratio
SCREAMING_SNAKE_CASE = downsamples
SCREAMING_SNAKE_CASE = dim
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = resolution
SCREAMING_SNAKE_CASE = pool_size
SCREAMING_SNAKE_CASE = downsample_patch_size
SCREAMING_SNAKE_CASE = downsample_stride
SCREAMING_SNAKE_CASE = downsample_pad
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = num_metaad_blocks
SCREAMING_SNAKE_CASE = distillation
SCREAMING_SNAKE_CASE = use_layer_scale
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = batch_norm_eps
| 296 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE_ = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def __lowercase ( _SCREAMING_SNAKE_CASE=None ) -> int:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser("""tpu-config""" , description=_description )
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
SCREAMING_SNAKE_CASE = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=_SCREAMING_SNAKE_CASE , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=_SCREAMING_SNAKE_CASE , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
SCREAMING_SNAKE_CASE = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=_SCREAMING_SNAKE_CASE , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
SCREAMING_SNAKE_CASE = defaults.command_file
if not args.command and defaults.commands is not None:
SCREAMING_SNAKE_CASE = defaults.commands
if not args.tpu_name:
SCREAMING_SNAKE_CASE = defaults.tpu_name
if not args.tpu_zone:
SCREAMING_SNAKE_CASE = defaults.tpu_zone
if args.accelerate_version == "dev":
SCREAMING_SNAKE_CASE = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
SCREAMING_SNAKE_CASE = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
SCREAMING_SNAKE_CASE = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
SCREAMING_SNAKE_CASE = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
SCREAMING_SNAKE_CASE = """; """.join(_SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
SCREAMING_SNAKE_CASE = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(_SCREAMING_SNAKE_CASE )}""" )
return
subprocess.run(_SCREAMING_SNAKE_CASE )
print("""Successfully setup pod.""" )
def __lowercase ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tpu_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
tpu_command_launcher(_SCREAMING_SNAKE_CASE )
| 296 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE_ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if "://" in dataset_path:
SCREAMING_SNAKE_CASE = dataset_path.split("""://""" )[1]
return dataset_path
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = not is_remote_filesystem(_SCREAMING_SNAKE_CASE )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_SCREAMING_SNAKE_CASE ) , fs._strip_protocol(_SCREAMING_SNAKE_CASE ) )
else:
fs.mv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , recursive=_SCREAMING_SNAKE_CASE )
def __lowercase ( ) -> None:
'''simple docstring'''
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = threading.Lock()
| 296 | 1 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[Any]=14 ,lowerCamelCase__ : str=7 ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Optional[int]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=True ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Tuple=99 ,lowerCamelCase__ : Union[str, Any]=32 ,lowerCamelCase__ : Optional[int]=5 ,lowerCamelCase__ : Dict=4 ,lowerCamelCase__ : Tuple=37 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Union[str, Any]=512 ,lowerCamelCase__ : Optional[int]=16 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Optional[Any]=0.02 ,lowerCamelCase__ : int=3 ,lowerCamelCase__ : Dict=4 ,lowerCamelCase__ : List[str]=None ,) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = use_mc_token_ids
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
SCREAMING_SNAKE_CASE = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : str ,*lowerCamelCase__ : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = CTRLModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
model(lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,head_mask=lowerCamelCase__ )
model(lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Any ,*lowerCamelCase__ : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = CTRLLMHeadModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) = config_and_inputs
SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[Any] ,*lowerCamelCase__ : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = CTRLForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : int = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__snake_case : Optional[int] = (CTRLLMHeadModel,) if is_torch_available() else ()
__snake_case : List[Any] = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : List[Any] = True
__snake_case : Any = False
__snake_case : int = False
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = CTRLModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase__ ,n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = CTRLModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
'''simple docstring'''
pass
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[11859, 0, 1611, 8]] ,dtype=torch.long ,device=lowerCamelCase__ ) # Legal the president is
SCREAMING_SNAKE_CASE = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE = model.generate(lowerCamelCase__ ,do_sample=lowerCamelCase__ )
self.assertListEqual(output_ids[0].tolist() ,lowerCamelCase__ )
| 296 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ,return_dict=lowerCamelCase__ )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """google/ncsnpp-celebahq-256"""
SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 296 | 1 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/dummy-config.json""")
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""fake-roberta""" )
os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ ,"""config.json""" ) ,"""w""" ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(type(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
try:
AutoConfig.register("""custom""" ,lowerCamelCase__ )
# Wrong model type will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoConfig.register("""model""" ,lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoConfig.register("""bert""" ,lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""bert-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,revision="""aaaaaa""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" ,):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_config.__class__.__name__ ,"""NewModelConfig""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = "new-model"
try:
AutoConfig.register("""new-model""" ,lowerCamelCase__ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 296 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Callable ,lowerCamelCase__ : Optional[Features] = None ,lowerCamelCase__ : str = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[dict] = None ,lowerCamelCase__ : Optional[int] = None ,**lowerCamelCase__ : Optional[Any] ,) -> List[str]:
'''simple docstring'''
super().__init__(
features=lowerCamelCase__ ,cache_dir=lowerCamelCase__ ,keep_in_memory=lowerCamelCase__ ,streaming=lowerCamelCase__ ,num_proc=lowerCamelCase__ ,**lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = Generator(
cache_dir=lowerCamelCase__ ,features=lowerCamelCase__ ,generator=lowerCamelCase__ ,gen_kwargs=lowerCamelCase__ ,**lowerCamelCase__ ,)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ ,download_mode=lowerCamelCase__ ,verification_mode=lowerCamelCase__ ,base_path=lowerCamelCase__ ,num_proc=self.num_proc ,)
SCREAMING_SNAKE_CASE = self.builder.as_dataset(
split="""train""" ,verification_mode=lowerCamelCase__ ,in_memory=self.keep_in_memory )
return dataset
| 296 | 1 |
import os
import string
import sys
SCREAMING_SNAKE_CASE_ = 1 << 8
SCREAMING_SNAKE_CASE_ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 2_7,
"""up""": 6_5 + ARROW_KEY_FLAG,
"""down""": 6_6 + ARROW_KEY_FLAG,
"""right""": 6_7 + ARROW_KEY_FLAG,
"""left""": 6_8 + ARROW_KEY_FLAG,
"""mod_int""": 9_1,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 5_0,
"""delete""": 5_1,
"""pg_up""": 5_3,
"""pg_down""": 5_4,
}
SCREAMING_SNAKE_CASE_ = KEYMAP["""up"""]
SCREAMING_SNAKE_CASE_ = KEYMAP["""left"""]
if sys.platform == "win32":
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(1_0):
SCREAMING_SNAKE_CASE_ = ord(str(i))
def __lowercase ( ) -> Tuple:
'''simple docstring'''
if os.name == "nt":
import msvcrt
SCREAMING_SNAKE_CASE = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_SCREAMING_SNAKE_CASE ) == 0:
# Read the keystroke
SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_SCREAMING_SNAKE_CASE )
if ord(_SCREAMING_SNAKE_CASE ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
SCREAMING_SNAKE_CASE = chr(KEYMAP["""esc"""] )
except KeyError:
SCREAMING_SNAKE_CASE = cha[1]
else:
SCREAMING_SNAKE_CASE = ch.decode(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
SCREAMING_SNAKE_CASE = sys.stdin.fileno()
SCREAMING_SNAKE_CASE = termios.tcgetattr(_SCREAMING_SNAKE_CASE )
try:
tty.setraw(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(_SCREAMING_SNAKE_CASE , termios.TCSADRAIN , _SCREAMING_SNAKE_CASE )
return ch
def __lowercase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(_SCREAMING_SNAKE_CASE ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_SCREAMING_SNAKE_CASE ) == KEYMAP["esc"]:
SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(_SCREAMING_SNAKE_CASE ) == KEYMAP["mod_int"]:
SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(_SCREAMING_SNAKE_CASE ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_SCREAMING_SNAKE_CASE ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_SCREAMING_SNAKE_CASE ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 296 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
__snake_case : Optional[str] = field(
default="linear" , metadata={"help": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 296 | 1 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Any = ["image_processor", "tokenizer"]
__snake_case : str = "AutoImageProcessor"
__snake_case : Optional[int] = "AutoTokenizer"
def __init__( self : Tuple ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Optional[int]=None ,**lowerCamelCase__ : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.image_processor
SCREAMING_SNAKE_CASE = False
def __call__( self : Union[str, Any] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = kwargs.pop("""images""" ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = kwargs.pop("""text""" ,lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
SCREAMING_SNAKE_CASE = args[0]
SCREAMING_SNAKE_CASE = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
SCREAMING_SNAKE_CASE = self.image_processor(lowerCamelCase__ ,*lowerCamelCase__ ,**lowerCamelCase__ )
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase__ ,**lowerCamelCase__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE = encodings["""input_ids"""]
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Any ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ ,**lowerCamelCase__ )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.tokenizer
yield
SCREAMING_SNAKE_CASE = self.image_processor
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Union[str, Any]=False ,lowerCamelCase__ : Optional[int]=None ) -> List[str]:
'''simple docstring'''
if added_vocab is None:
SCREAMING_SNAKE_CASE = self.tokenizer.get_added_vocab()
SCREAMING_SNAKE_CASE = {}
while tokens:
SCREAMING_SNAKE_CASE = re.search(R"""<s_(.*?)>""" ,lowerCamelCase__ ,re.IGNORECASE )
if start_token is None:
break
SCREAMING_SNAKE_CASE = start_token.group(1 )
SCREAMING_SNAKE_CASE = re.search(RF"""</s_{key}>""" ,lowerCamelCase__ ,re.IGNORECASE )
SCREAMING_SNAKE_CASE = start_token.group()
if end_token is None:
SCREAMING_SNAKE_CASE = tokens.replace(lowerCamelCase__ ,"""""" )
else:
SCREAMING_SNAKE_CASE = end_token.group()
SCREAMING_SNAKE_CASE = re.escape(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = re.escape(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" ,lowerCamelCase__ ,re.IGNORECASE )
if content is not None:
SCREAMING_SNAKE_CASE = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
SCREAMING_SNAKE_CASE = self.tokenajson(lowerCamelCase__ ,is_inner_value=lowerCamelCase__ ,added_vocab=lowerCamelCase__ )
if value:
if len(lowerCamelCase__ ) == 1:
SCREAMING_SNAKE_CASE = value[0]
SCREAMING_SNAKE_CASE = value
else: # leaf nodes
SCREAMING_SNAKE_CASE = []
for leaf in content.split(R"""<sep/>""" ):
SCREAMING_SNAKE_CASE = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
SCREAMING_SNAKE_CASE = leaf[1:-2] # for categorical special tokens
output[key].append(lowerCamelCase__ )
if len(output[key] ) == 1:
SCREAMING_SNAKE_CASE = output[key][0]
SCREAMING_SNAKE_CASE = tokens[tokens.find(lowerCamelCase__ ) + len(lowerCamelCase__ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] ,is_inner_value=lowerCamelCase__ ,added_vocab=lowerCamelCase__ )
if len(lowerCamelCase__ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,lowerCamelCase__ ,)
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,lowerCamelCase__ ,)
return self.image_processor
| 296 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : List[str] = TextToVideoSDPipeline
__snake_case : int = TEXT_TO_IMAGE_PARAMS
__snake_case : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Dict = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") ,up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=0 ) -> List[Any]:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """np"""
SCREAMING_SNAKE_CASE = sd_pipe(**lowerCamelCase__ ).frames
SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
SCREAMING_SNAKE_CASE = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=25 ,output_type="""pt""" ).frames
SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type="""pt""" ).frames
SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 296 | 1 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Callable ,lowerCamelCase__ : Optional[Features] = None ,lowerCamelCase__ : str = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[dict] = None ,lowerCamelCase__ : Optional[int] = None ,**lowerCamelCase__ : Optional[Any] ,) -> List[str]:
'''simple docstring'''
super().__init__(
features=lowerCamelCase__ ,cache_dir=lowerCamelCase__ ,keep_in_memory=lowerCamelCase__ ,streaming=lowerCamelCase__ ,num_proc=lowerCamelCase__ ,**lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = Generator(
cache_dir=lowerCamelCase__ ,features=lowerCamelCase__ ,generator=lowerCamelCase__ ,gen_kwargs=lowerCamelCase__ ,**lowerCamelCase__ ,)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ ,download_mode=lowerCamelCase__ ,verification_mode=lowerCamelCase__ ,base_path=lowerCamelCase__ ,num_proc=self.num_proc ,)
SCREAMING_SNAKE_CASE = self.builder.as_dataset(
split="""train""" ,verification_mode=lowerCamelCase__ ,in_memory=self.keep_in_memory )
return dataset
| 296 |
def __lowercase ( _SCREAMING_SNAKE_CASE = 10 ) -> str:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or n < 0:
raise ValueError("""Invalid input""" )
SCREAMING_SNAKE_CASE = 10**n
SCREAMING_SNAKE_CASE = 2_84_33 * (pow(2 , 7_83_04_57 , _SCREAMING_SNAKE_CASE )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(1_0) = }''')
| 296 | 1 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : ArgumentParser ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" ,type=lowerCamelCase__ ,default=lowerCamelCase__ ,help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" ,action="""store_true""" ,help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" ,action="""store_true""" ,help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" ,)
download_parser.add_argument("""model""" ,type=lowerCamelCase__ ,help="""Name of the model to download""" )
download_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : bool ,lowerCamelCase__ : bool ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = cache
SCREAMING_SNAKE_CASE = force
SCREAMING_SNAKE_CASE = trust_remote_code
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 296 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 296 | 1 |
def __lowercase ( _SCREAMING_SNAKE_CASE = 1_00_00_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = {1: 1}
for inputa in range(2 , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
SCREAMING_SNAKE_CASE = (3 * number) + 1
counter += 1
if inputa not in counters:
SCREAMING_SNAKE_CASE = counter
if counter > pre_counter:
SCREAMING_SNAKE_CASE = inputa
SCREAMING_SNAKE_CASE = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 296 |
from pathlib import Path
import fire
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE )
dest_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
for path in src_dir.iterdir():
SCREAMING_SNAKE_CASE = [x.rstrip() for x in list(path.open().readlines() )][:n]
SCREAMING_SNAKE_CASE = dest_dir.joinpath(path.name )
print(_SCREAMING_SNAKE_CASE )
dest_path.open("""w""" ).write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
fire.Fire(minify)
| 296 | 1 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Tuple = "xlm-prophetnet"
__snake_case : Dict = ["past_key_values"]
__snake_case : int = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self : List[str] ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[Union[str, Callable]] = "gelu" ,lowerCamelCase__ : Optional[int] = 30522 ,lowerCamelCase__ : Optional[int] = 1024 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[int] = 512 ,lowerCamelCase__ : Optional[float] = 0.02 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 2 ,lowerCamelCase__ : Optional[int] = 32 ,lowerCamelCase__ : Optional[int] = 128 ,lowerCamelCase__ : Optional[bool] = False ,lowerCamelCase__ : Optional[float] = 0.0 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 1 ,lowerCamelCase__ : Optional[int] = 2 ,**lowerCamelCase__ : Dict ,) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = num_encoder_layers
SCREAMING_SNAKE_CASE = num_encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = num_decoder_layers
SCREAMING_SNAKE_CASE = num_decoder_attention_heads
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = init_std # Normal(0, this parameter)
SCREAMING_SNAKE_CASE = activation_function
# parameters for xlmprophetnet
SCREAMING_SNAKE_CASE = ngram
SCREAMING_SNAKE_CASE = num_buckets
SCREAMING_SNAKE_CASE = relative_max_distance
SCREAMING_SNAKE_CASE = disable_ngram_loss
SCREAMING_SNAKE_CASE = eps
# 3 Types of Dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,add_cross_attention=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : str ) -> Dict:
'''simple docstring'''
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"""
""" `num_decoder_layers`.""" )
| 296 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/dummy-config.json""")
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""fake-roberta""" )
os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ ,"""config.json""" ) ,"""w""" ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(type(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
try:
AutoConfig.register("""custom""" ,lowerCamelCase__ )
# Wrong model type will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoConfig.register("""model""" ,lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoConfig.register("""bert""" ,lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""bert-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,revision="""aaaaaa""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" ,):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_config.__class__.__name__ ,"""NewModelConfig""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = "new-model"
try:
AutoConfig.register("""new-model""" ,lowerCamelCase__ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 296 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Any = "convbert"
def __init__( self : Union[str, Any] ,lowerCamelCase__ : List[Any]=30522 ,lowerCamelCase__ : Optional[Any]=768 ,lowerCamelCase__ : Union[str, Any]=12 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : Union[str, Any]=3072 ,lowerCamelCase__ : Any="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Dict=512 ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Any=0.02 ,lowerCamelCase__ : Optional[int]=1e-1_2 ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : Union[str, Any]=0 ,lowerCamelCase__ : int=2 ,lowerCamelCase__ : Union[str, Any]=768 ,lowerCamelCase__ : Any=2 ,lowerCamelCase__ : Optional[Any]=9 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : Any=None ,**lowerCamelCase__ : Union[str, Any] ,) -> str:
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = head_ratio
SCREAMING_SNAKE_CASE = conv_kernel_size
SCREAMING_SNAKE_CASE = num_groups
SCREAMING_SNAKE_CASE = classifier_dropout
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 296 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = "git_vision_model"
def __init__( self : List[Any] ,lowerCamelCase__ : Dict=768 ,lowerCamelCase__ : Union[str, Any]=3072 ,lowerCamelCase__ : Optional[int]=12 ,lowerCamelCase__ : Tuple=12 ,lowerCamelCase__ : Tuple=3 ,lowerCamelCase__ : Optional[Any]=224 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]="quick_gelu" ,lowerCamelCase__ : Optional[Any]=1e-5 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : Optional[int]=0.02 ,**lowerCamelCase__ : Union[str, Any] ,) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = "git"
def __init__( self : Optional[int] ,lowerCamelCase__ : int=None ,lowerCamelCase__ : str=30522 ,lowerCamelCase__ : Tuple=768 ,lowerCamelCase__ : Union[str, Any]=6 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : List[str]=3072 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : List[str]=1024 ,lowerCamelCase__ : List[str]=0.02 ,lowerCamelCase__ : str=1e-1_2 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Optional[int]="absolute" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=False ,lowerCamelCase__ : int=101 ,lowerCamelCase__ : int=102 ,lowerCamelCase__ : Dict=None ,**lowerCamelCase__ : List[Any] ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,pad_token_id=lowerCamelCase__ ,**lowerCamelCase__ )
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE = GitVisionConfig(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = num_image_with_embedding
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 296 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE_ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if "://" in dataset_path:
SCREAMING_SNAKE_CASE = dataset_path.split("""://""" )[1]
return dataset_path
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = not is_remote_filesystem(_SCREAMING_SNAKE_CASE )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_SCREAMING_SNAKE_CASE ) , fs._strip_protocol(_SCREAMING_SNAKE_CASE ) )
else:
fs.mv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , recursive=_SCREAMING_SNAKE_CASE )
def __lowercase ( ) -> None:
'''simple docstring'''
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = threading.Lock()
| 296 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = random.Random()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE = global_rng
SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : Optional[Any]=400 ,lowerCamelCase__ : List[str]=2000 ,lowerCamelCase__ : List[str]=2048 ,lowerCamelCase__ : Any=128 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : str=512 ,lowerCamelCase__ : Optional[Any]=30 ,lowerCamelCase__ : Tuple=44100 ,) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = min_seq_length
SCREAMING_SNAKE_CASE = max_seq_length
SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE = spectrogram_length
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = num_audio_channels
SCREAMING_SNAKE_CASE = hop_length
SCREAMING_SNAKE_CASE = chunk_length
SCREAMING_SNAKE_CASE = sampling_rate
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=False ) -> str:
'''simple docstring'''
def _flatten(lowerCamelCase__ : List[Any] ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : List[Any] = TvltFeatureExtractor
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TvltFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,"""spectrogram_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""feature_size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""num_audio_channels""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""hop_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""chunk_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""sampling_rate""" ) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""feat_extract.json""" )
feat_extract_first.to_json_file(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE = feature_extractor(
lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ,mask_audio=lowerCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(lowerCamelCase__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape ,(1, 1, 192, 128) )
SCREAMING_SNAKE_CASE = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,lowerCamelCase__ ,atol=1e-4 ) )
| 296 | 1 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Union[str, Any]=2 ,lowerCamelCase__ : List[str]=8 ,lowerCamelCase__ : str=True ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Optional[int]=True ,lowerCamelCase__ : List[Any]=99 ,lowerCamelCase__ : List[str]=16 ,lowerCamelCase__ : List[Any]=5 ,lowerCamelCase__ : int=2 ,lowerCamelCase__ : Any=36 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : Any=0.0 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : Tuple=512 ,lowerCamelCase__ : Tuple=16 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : List[str]=0.02 ,lowerCamelCase__ : Dict=3 ,lowerCamelCase__ : List[Any]=4 ,lowerCamelCase__ : Dict=None ,) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_config()
SCREAMING_SNAKE_CASE = 300
return config
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
'''simple docstring'''
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MraModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any ,lowerCamelCase__ : Dict ,) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = MraModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,encoder_hidden_states=lowerCamelCase__ ,encoder_attention_mask=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,encoder_hidden_states=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MraForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MraForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,start_positions=lowerCamelCase__ ,end_positions=lowerCamelCase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : int ,lowerCamelCase__ : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = MraForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Any ,lowerCamelCase__ : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = MraForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = MraForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) = config_and_inputs
SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Dict = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : str = False
__snake_case : Union[str, Any] = False
__snake_case : Optional[Any] = False
__snake_case : Optional[Any] = False
__snake_case : List[str] = ()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MraModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase__ ,hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = MraModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason="""MRA does not output attentions""" )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
return
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 256, 768) )
self.assertEqual(output.shape ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )[0]
SCREAMING_SNAKE_CASE = 50265
SCREAMING_SNAKE_CASE = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
SCREAMING_SNAKE_CASE = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )[0]
SCREAMING_SNAKE_CASE = 50265
SCREAMING_SNAKE_CASE = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
| 296 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
SCREAMING_SNAKE_CASE = [1_44, 1_92, 2_40]
SCREAMING_SNAKE_CASE = [16, 32, 64, 96, 1_28, 1_60, 6_40]
elif "mobilevit_xs" in mobilevit_name:
SCREAMING_SNAKE_CASE = [96, 1_20, 1_44]
SCREAMING_SNAKE_CASE = [16, 32, 48, 64, 80, 96, 3_84]
elif "mobilevit_xxs" in mobilevit_name:
SCREAMING_SNAKE_CASE = [64, 80, 96]
SCREAMING_SNAKE_CASE = [16, 16, 24, 48, 64, 80, 3_20]
SCREAMING_SNAKE_CASE = 0.05
SCREAMING_SNAKE_CASE = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
SCREAMING_SNAKE_CASE = 5_12
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 21
SCREAMING_SNAKE_CASE = """pascal-voc-id2label.json"""
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
SCREAMING_SNAKE_CASE = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
SCREAMING_SNAKE_CASE = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
SCREAMING_SNAKE_CASE = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
SCREAMING_SNAKE_CASE = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
SCREAMING_SNAKE_CASE = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
SCREAMING_SNAKE_CASE = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
SCREAMING_SNAKE_CASE = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
SCREAMING_SNAKE_CASE = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
SCREAMING_SNAKE_CASE = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
SCREAMING_SNAKE_CASE = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
SCREAMING_SNAKE_CASE = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
SCREAMING_SNAKE_CASE = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
SCREAMING_SNAKE_CASE = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
SCREAMING_SNAKE_CASE = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
SCREAMING_SNAKE_CASE = """mobilevit.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict:
'''simple docstring'''
if base_model:
SCREAMING_SNAKE_CASE = """"""
else:
SCREAMING_SNAKE_CASE = """mobilevit."""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if key[:8] == "encoder.":
SCREAMING_SNAKE_CASE = key[8:]
if "qkv" in key:
SCREAMING_SNAKE_CASE = key.split(""".""" )
SCREAMING_SNAKE_CASE = int(key_split[0][6:] ) - 1
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
SCREAMING_SNAKE_CASE = layer.transformer.layer[transformer_num].attention.attention.all_head_size
SCREAMING_SNAKE_CASE = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[:dim]
SCREAMING_SNAKE_CASE = val[dim : dim * 2]
SCREAMING_SNAKE_CASE = val[-dim:]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowercase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_mobilevit_config(_SCREAMING_SNAKE_CASE )
# load original state_dict
SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval()
else:
SCREAMING_SNAKE_CASE = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
SCREAMING_SNAKE_CASE = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
else:
assert logits.shape == (1, 10_00)
if mobilevit_name == "mobilevit_s":
SCREAMING_SNAKE_CASE = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
SCREAMING_SNAKE_CASE = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
SCREAMING_SNAKE_CASE = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
SCREAMING_SNAKE_CASE = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
SCREAMING_SNAKE_CASE = model_mapping[mobilevit_name]
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 296 | 1 |
import numpy as np
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (0, 0)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
def __eq__( self : Optional[int] ,lowerCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
return self.position == cell.position
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
print(self.position )
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : Tuple ,lowerCamelCase__ : Union[str, Any]=(5, 5) ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.zeros(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = world_size[0]
SCREAMING_SNAKE_CASE = world_size[1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
print(self.w )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
SCREAMING_SNAKE_CASE = cell.position[0]
SCREAMING_SNAKE_CASE = cell.position[1]
SCREAMING_SNAKE_CASE = []
for n in neughbour_cord:
SCREAMING_SNAKE_CASE = current_x + n[0]
SCREAMING_SNAKE_CASE = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
SCREAMING_SNAKE_CASE = Cell()
SCREAMING_SNAKE_CASE = (x, y)
SCREAMING_SNAKE_CASE = cell
neighbours.append(lowerCamelCase__ )
return neighbours
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
_open.append(_SCREAMING_SNAKE_CASE )
while _open:
SCREAMING_SNAKE_CASE = np.argmin([n.f for n in _open] )
SCREAMING_SNAKE_CASE = _open[min_f]
_closed.append(_open.pop(_SCREAMING_SNAKE_CASE ) )
if current == goal:
break
for n in world.get_neigbours(_SCREAMING_SNAKE_CASE ):
for c in _closed:
if c == n:
continue
SCREAMING_SNAKE_CASE = current.g + 1
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = n.position
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = goal.position
SCREAMING_SNAKE_CASE = (ya - ya) ** 2 + (xa - xa) ** 2
SCREAMING_SNAKE_CASE = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = []
while current.parent is not None:
path.append(current.position )
SCREAMING_SNAKE_CASE = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = Gridworld()
# Start position and goal
SCREAMING_SNAKE_CASE_ = Cell()
SCREAMING_SNAKE_CASE_ = (0, 0)
SCREAMING_SNAKE_CASE_ = Cell()
SCREAMING_SNAKE_CASE_ = (4, 4)
print(F'''path from {start.position} to {goal.position}''')
SCREAMING_SNAKE_CASE_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
SCREAMING_SNAKE_CASE_ = 1
print(world.w)
| 296 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[str] = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : int = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[str] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 296 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = Dict[str, Any]
SCREAMING_SNAKE_CASE_ = List[Prediction]
@add_end_docstrings(lowerCAmelCase_ )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,*lowerCamelCase__ : str ,**lowerCamelCase__ : str ) -> Tuple:
'''simple docstring'''
super().__init__(*lowerCamelCase__ ,**lowerCamelCase__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self ,"""vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,**lowerCamelCase__ : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : List[str] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : List[Any] ) -> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*lowerCamelCase__ ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_image(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.IntTensor([[image.height, image.width]] )
SCREAMING_SNAKE_CASE = self.image_processor(images=[image] ,return_tensors="""pt""" )
if self.tokenizer is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(text=inputs["""words"""] ,boxes=inputs["""boxes"""] ,return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = target_size
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = model_inputs.pop("""target_size""" )
SCREAMING_SNAKE_CASE = self.model(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
SCREAMING_SNAKE_CASE = model_inputs["""bbox"""]
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int=0.9 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = target_size[0].tolist()
def unnormalize(lowerCamelCase__ : int ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
SCREAMING_SNAKE_CASE = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
SCREAMING_SNAKE_CASE = [unnormalize(lowerCamelCase__ ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
SCREAMING_SNAKE_CASE = ["""score""", """label""", """box"""]
SCREAMING_SNAKE_CASE = [dict(zip(lowerCamelCase__ ,lowerCamelCase__ ) ) for vals in zip(scores.tolist() ,lowerCamelCase__ ,lowerCamelCase__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = raw_annotations[0]
SCREAMING_SNAKE_CASE = raw_annotation["""scores"""]
SCREAMING_SNAKE_CASE = raw_annotation["""labels"""]
SCREAMING_SNAKE_CASE = raw_annotation["""boxes"""]
SCREAMING_SNAKE_CASE = scores.tolist()
SCREAMING_SNAKE_CASE = [self.model.config.idalabel[label.item()] for label in labels]
SCREAMING_SNAKE_CASE = [self._get_bounding_box(lowerCamelCase__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
SCREAMING_SNAKE_CASE = ["""score""", """label""", """box"""]
SCREAMING_SNAKE_CASE = [
dict(zip(lowerCamelCase__ ,lowerCamelCase__ ) )
for vals in zip(raw_annotation["""scores"""] ,raw_annotation["""labels"""] ,raw_annotation["""boxes"""] )
]
return annotation
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : "torch.Tensor" ) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = box.int().tolist()
SCREAMING_SNAKE_CASE = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 296 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = "table-transformer"
__snake_case : Union[str, Any] = ["past_key_values"]
__snake_case : List[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Optional[int]=100 ,lowerCamelCase__ : List[Any]=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : List[Any]=8 ,lowerCamelCase__ : Dict=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : Any=8 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : int=0.0 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Optional[int]="relu" ,lowerCamelCase__ : Tuple=256 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : List[Any]=0.02 ,lowerCamelCase__ : int=1.0 ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Optional[Any]="sine" ,lowerCamelCase__ : List[str]="resnet50" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : Dict=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : Any=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : str=0.1 ,**lowerCamelCase__ : List[str] ,) -> Optional[int]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = backbone_config.get("""model_type""" )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = None, None, None
SCREAMING_SNAKE_CASE = use_timm_backbone
SCREAMING_SNAKE_CASE = backbone_config
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = num_queries
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = init_xavier_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = auxiliary_loss
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = backbone
SCREAMING_SNAKE_CASE = use_pretrained_backbone
SCREAMING_SNAKE_CASE = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE = class_cost
SCREAMING_SNAKE_CASE = bbox_cost
SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE = mask_loss_coefficient
SCREAMING_SNAKE_CASE = dice_loss_coefficient
SCREAMING_SNAKE_CASE = bbox_loss_coefficient
SCREAMING_SNAKE_CASE = giou_loss_coefficient
SCREAMING_SNAKE_CASE = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase__ ,**lowerCamelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.d_model
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> float:
'''simple docstring'''
return 1e-5
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
'''simple docstring'''
return 12
| 296 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 296 |
from collections import defaultdict
from math import gcd
def __lowercase ( _SCREAMING_SNAKE_CASE = 1_50_00_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = defaultdict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _SCREAMING_SNAKE_CASE , 2 ):
if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > 1:
continue
SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_SCREAMING_SNAKE_CASE , limit + 1 , _SCREAMING_SNAKE_CASE ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 296 | 1 |
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> list[list[int]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
if len(_SCREAMING_SNAKE_CASE ) == 1:
return [nums.copy()]
for _ in range(len(_SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE = nums.pop(0 )
SCREAMING_SNAKE_CASE = permute(_SCREAMING_SNAKE_CASE )
for perm in permutations:
perm.append(_SCREAMING_SNAKE_CASE )
result.extend(_SCREAMING_SNAKE_CASE )
nums.append(_SCREAMING_SNAKE_CASE )
return result
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
def backtrack(_SCREAMING_SNAKE_CASE ):
if start == len(_SCREAMING_SNAKE_CASE ) - 1:
output.append(nums[:] )
else:
for i in range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = nums[i], nums[start]
backtrack(start + 1 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = nums[i], nums[start] # backtrack
SCREAMING_SNAKE_CASE = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
SCREAMING_SNAKE_CASE_ = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 296 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Let's go
SCREAMING_SNAKE_CASE = parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE = args.func(_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 296 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = "Salesforce/blip-image-captioning-base"
__snake_case : Optional[int] = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
__snake_case : Union[str, Any] = "image_captioner"
__snake_case : Optional[Any] = AutoModelForVisionaSeq
__snake_case : str = ["image"]
__snake_case : str = ["text"]
def __init__( self : Any ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,["""vision"""] )
super().__init__(*lowerCamelCase__ ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : "Image" ) -> Tuple:
'''simple docstring'''
return self.pre_processor(images=lowerCamelCase__ ,return_tensors="""pt""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return self.model.generate(**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )[0].strip()
| 296 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SwinConfig()
SCREAMING_SNAKE_CASE = swin_name.split("""_""" )
SCREAMING_SNAKE_CASE = name_split[1]
SCREAMING_SNAKE_CASE = int(name_split[4] )
SCREAMING_SNAKE_CASE = int(name_split[3][-1] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE = 1_28
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE = 1_92
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (6, 12, 24, 48)
if "in22k" in swin_name:
SCREAMING_SNAKE_CASE = 2_18_41
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = img_size
SCREAMING_SNAKE_CASE = num_classes
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = window_size
return config
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
SCREAMING_SNAKE_CASE = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE = """layernorm.weight"""
if name == "norm.bias":
SCREAMING_SNAKE_CASE = """layernorm.bias"""
if "head" in name:
SCREAMING_SNAKE_CASE = name.replace("""head""" , """classifier""" )
else:
SCREAMING_SNAKE_CASE = """swin.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE = key.split(""".""" )
SCREAMING_SNAKE_CASE = int(key_split[1] )
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[
:dim
]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
SCREAMING_SNAKE_CASE = get_swin_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = SwinForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = timm_model(inputs["""pixel_values"""] )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 296 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE_ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE_ = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Tuple = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : Any = ["input_ids", "attention_mask"]
__snake_case : int = GPTaTokenizer
def __init__( self : Dict ,lowerCamelCase__ : int=None ,lowerCamelCase__ : Dict=None ,lowerCamelCase__ : Dict=None ,lowerCamelCase__ : Union[str, Any]="<|endoftext|>" ,lowerCamelCase__ : Optional[Any]="<|endoftext|>" ,lowerCamelCase__ : Any="<|endoftext|>" ,lowerCamelCase__ : Tuple=False ,**lowerCamelCase__ : Dict ,) -> Dict:
'''simple docstring'''
super().__init__(
lowerCamelCase__ ,lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,add_prefix_space=lowerCamelCase__ ,**lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = kwargs.pop("""add_bos_token""" ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,lowerCamelCase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(lowerCamelCase__ ,pre_tok_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : Tuple ) -> BatchEncoding:
'''simple docstring'''
SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" ,lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase__ ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : int ) -> BatchEncoding:
'''simple docstring'''
SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" ,lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase__ ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowerCamelCase__ ,name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : "Conversation" ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) + [self.eos_token_id] )
if len(lowerCamelCase__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 296 |
import os
from distutils.util import strtobool
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
for e in env_keys:
SCREAMING_SNAKE_CASE = int(os.environ.get(_SCREAMING_SNAKE_CASE , -1 ) )
if val >= 0:
return val
return default
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) )
return strtobool(_SCREAMING_SNAKE_CASE ) == 1 # As its name indicates `strtobool` actually returns an int...
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="no" ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) )
return value
| 296 | 1 |
def __lowercase ( _SCREAMING_SNAKE_CASE = 1_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = n * (n + 1) * (2 * n + 1) / 6
SCREAMING_SNAKE_CASE = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 296 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__snake_case : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__snake_case : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Use FP16 to accelerate inference."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Benchmark training of model"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Verbose memory tracing"} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Trace memory line by line"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save result to a CSV file"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save all print statements in a log file"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to print environment information"} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__snake_case : str = field(
default=F"inference_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv."} , )
__snake_case : str = field(
default=F"inference_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__snake_case : str = field(
default=F"train_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__snake_case : str = field(
default=F"train_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__snake_case : str = field(
default=F"env_info_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving environment information."} , )
__snake_case : str = field(
default=F"log_{round(time() )}.csv" , metadata={"help": "Log filename used if print statements are saved in log."} , )
__snake_case : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" ,lowerCamelCase__ ,)
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) ,indent=2 )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 296 | 1 |
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> tuple[int, int]:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE = float(_SCREAMING_SNAKE_CASE )
except ValueError:
raise ValueError("""Please enter a valid number""" )
SCREAMING_SNAKE_CASE = decimal - int(_SCREAMING_SNAKE_CASE )
if fractional_part == 0:
return int(_SCREAMING_SNAKE_CASE ), 1
else:
SCREAMING_SNAKE_CASE = len(str(_SCREAMING_SNAKE_CASE ).split(""".""" )[1] )
SCREAMING_SNAKE_CASE = int(decimal * (10**number_of_frac_digits) )
SCREAMING_SNAKE_CASE = 10**number_of_frac_digits
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = denominator, numerator
while True:
SCREAMING_SNAKE_CASE = dividend % divisor
if remainder == 0:
break
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = divisor, remainder
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = numerator / divisor, denominator / divisor
return int(_SCREAMING_SNAKE_CASE ), int(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction('67') = }''')
print(F'''{decimal_to_fraction('45.0') = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction('6.25') = }''')
print(F'''{decimal_to_fraction('78td') = }''')
| 296 |
import math
import unittest
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) ,"""Zero doesn't have any positive factors, primes must have exactly two.""" ,)
self.assertFalse(
is_prime(1 ) ,"""One only has 1 positive factor, primes must have exactly two.""" ,)
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 296 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[Any] = (DEISMultistepScheduler,)
__snake_case : List[Any] = (("num_inference_steps", 25),)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,**lowerCamelCase__ : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**lowerCamelCase__ )
return config
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Optional[Any]=0 ,**lowerCamelCase__ : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("""num_inference_steps""" ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = sample, sample
for t in range(lowerCamelCase__ ,time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE = scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : List[Any]=0 ,**lowerCamelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("""num_inference_steps""" ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : Dict=None ,**lowerCamelCase__ : Optional[Any] ) -> Any:
'''simple docstring'''
if scheduler is None:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = scheduler_class(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = scheduler_class(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("""num_inference_steps""" ,lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ ,"""set_timesteps""" ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ ,"""set_timesteps""" ):
SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
SCREAMING_SNAKE_CASE = scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
SCREAMING_SNAKE_CASE = scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DEISMultistepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
SCREAMING_SNAKE_CASE = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase__ ,prediction_type=lowerCamelCase__ ,sample_max_value=lowerCamelCase__ ,algorithm_type="""deis""" ,solver_order=lowerCamelCase__ ,solver_type=lowerCamelCase__ ,)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase__ ,solver_type=lowerCamelCase__ ,prediction_type=lowerCamelCase__ ,algorithm_type=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = self.full_loop(
solver_order=lowerCamelCase__ ,solver_type=lowerCamelCase__ ,prediction_type=lowerCamelCase__ ,algorithm_type=lowerCamelCase__ ,)
assert not torch.isnan(lowerCamelCase__ ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCamelCase__ )
self.check_over_configs(lower_order_final=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCamelCase__ ,time_step=0 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop()
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(thresholding=lowerCamelCase__ ,dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE = scheduler_class(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 296 |
import random
class UpperCamelCase__ :
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : str ) -> tuple[list[int], list[int]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [ord(lowerCamelCase__ ) for i in text]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i in plain:
SCREAMING_SNAKE_CASE = random.randint(1 ,300 )
SCREAMING_SNAKE_CASE = (i + k) * k
cipher.append(lowerCamelCase__ )
key.append(lowerCamelCase__ )
return cipher, key
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : list[int] ,lowerCamelCase__ : list[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for i in range(len(lowerCamelCase__ ) ):
SCREAMING_SNAKE_CASE = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCamelCase__ ) )
return "".join(lowerCamelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 296 | 1 |
def __lowercase ( _SCREAMING_SNAKE_CASE = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = str(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = list(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = 0
for i in list_num:
sum_of_num += int(_SCREAMING_SNAKE_CASE )
return sum_of_num
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
SCREAMING_SNAKE_CASE_ = solution(power)
print("""Sum of the digits is: """, result)
| 296 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = "efficientformer"
def __init__( self : Optional[int] ,lowerCamelCase__ : List[int] = [3, 2, 6, 4] ,lowerCamelCase__ : List[int] = [48, 96, 224, 448] ,lowerCamelCase__ : List[bool] = [True, True, True, True] ,lowerCamelCase__ : int = 448 ,lowerCamelCase__ : int = 32 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : int = 7 ,lowerCamelCase__ : int = 5 ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : float = 1e-5 ,lowerCamelCase__ : str = "gelu" ,lowerCamelCase__ : float = 0.02 ,lowerCamelCase__ : float = 1e-1_2 ,lowerCamelCase__ : int = 224 ,lowerCamelCase__ : float = 1e-0_5 ,**lowerCamelCase__ : str ,) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_expansion_ratio
SCREAMING_SNAKE_CASE = downsamples
SCREAMING_SNAKE_CASE = dim
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = resolution
SCREAMING_SNAKE_CASE = pool_size
SCREAMING_SNAKE_CASE = downsample_patch_size
SCREAMING_SNAKE_CASE = downsample_stride
SCREAMING_SNAKE_CASE = downsample_pad
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = num_metaad_blocks
SCREAMING_SNAKE_CASE = distillation
SCREAMING_SNAKE_CASE = use_layer_scale
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = batch_norm_eps
| 296 | 1 |
from functools import lru_cache
@lru_cache
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE_ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if "://" in dataset_path:
SCREAMING_SNAKE_CASE = dataset_path.split("""://""" )[1]
return dataset_path
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = not is_remote_filesystem(_SCREAMING_SNAKE_CASE )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_SCREAMING_SNAKE_CASE ) , fs._strip_protocol(_SCREAMING_SNAKE_CASE ) )
else:
fs.mv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , recursive=_SCREAMING_SNAKE_CASE )
def __lowercase ( ) -> None:
'''simple docstring'''
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = threading.Lock()
| 296 | 1 |
from copy import deepcopy
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : Tuple ,lowerCamelCase__ : list[int] | None = None ,lowerCamelCase__ : int | None = None ) -> None:
'''simple docstring'''
if arr is None and size is not None:
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = [0] * size
elif arr is not None:
self.init(lowerCamelCase__ )
else:
raise ValueError("""Either arr or size must be specified""" )
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : list[int] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = deepcopy(lowerCamelCase__ )
for i in range(1 ,self.size ):
SCREAMING_SNAKE_CASE = self.next_(lowerCamelCase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tree[:]
for i in range(self.size - 1 ,0 ,-1 ):
SCREAMING_SNAKE_CASE = self.next_(lowerCamelCase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : int ) -> int:
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : int ) -> int:
'''simple docstring'''
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> None:
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
SCREAMING_SNAKE_CASE = self.next_(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> None:
'''simple docstring'''
self.add(lowerCamelCase__ ,value - self.get(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : int ) -> int:
'''simple docstring'''
if right == 0:
return 0
SCREAMING_SNAKE_CASE = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
SCREAMING_SNAKE_CASE = self.prev(lowerCamelCase__ )
return result
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> int:
'''simple docstring'''
return self.prefix(lowerCamelCase__ ) - self.prefix(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : int ) -> int:
'''simple docstring'''
return self.query(lowerCamelCase__ ,index + 1 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : int ) -> int:
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
SCREAMING_SNAKE_CASE = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
SCREAMING_SNAKE_CASE = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ,return_dict=lowerCamelCase__ )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """google/ncsnpp-celebahq-256"""
SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 296 | 1 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Optional[Any] = StableDiffusionControlNetImgaImgPipeline
__snake_case : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__snake_case : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__snake_case : str = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__snake_case : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : str=0 ) -> Any:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=lowerCamelCase__ ,device=torch.device(lowerCamelCase__ ) ,)
SCREAMING_SNAKE_CASE = floats_tensor(control_image.shape ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" ).resize((64, 64) )
SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Optional[Any] = StableDiffusionControlNetImgaImgPipeline
__snake_case : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__snake_case : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__snake_case : Dict = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
def init_weights(lowerCamelCase__ : List[str] ):
if isinstance(lowerCamelCase__ ,torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(lowerCamelCase__ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(lowerCamelCase__ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE = MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[Any]=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=lowerCamelCase__ ,device=torch.device(lowerCamelCase__ ) ,),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=lowerCamelCase__ ,device=torch.device(lowerCamelCase__ ) ,),
]
SCREAMING_SNAKE_CASE = floats_tensor(control_image[0].shape ,rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" ).resize((64, 64) )
SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = 10.0
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = steps
SCREAMING_SNAKE_CASE = scale
SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase__ )[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = steps
SCREAMING_SNAKE_CASE = scale
SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase__ ,control_guidance_start=0.1 ,control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = steps
SCREAMING_SNAKE_CASE = scale
SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase__ ,control_guidance_start=[0.1, 0.3] ,control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = steps
SCREAMING_SNAKE_CASE = scale
SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase__ ,control_guidance_start=0.4 ,control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCamelCase__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
SCREAMING_SNAKE_CASE = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,safety_checker=lowerCamelCase__ ,controlnet=lowerCamelCase__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE = """evil space-punk bird"""
SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
SCREAMING_SNAKE_CASE = pipe(
lowerCamelCase__ ,lowerCamelCase__ ,control_image=lowerCamelCase__ ,generator=lowerCamelCase__ ,output_type="""np""" ,num_inference_steps=50 ,strength=0.6 ,)
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9e-2
| 296 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Callable ,lowerCamelCase__ : Optional[Features] = None ,lowerCamelCase__ : str = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[dict] = None ,lowerCamelCase__ : Optional[int] = None ,**lowerCamelCase__ : Optional[Any] ,) -> List[str]:
'''simple docstring'''
super().__init__(
features=lowerCamelCase__ ,cache_dir=lowerCamelCase__ ,keep_in_memory=lowerCamelCase__ ,streaming=lowerCamelCase__ ,num_proc=lowerCamelCase__ ,**lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = Generator(
cache_dir=lowerCamelCase__ ,features=lowerCamelCase__ ,generator=lowerCamelCase__ ,gen_kwargs=lowerCamelCase__ ,**lowerCamelCase__ ,)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ ,download_mode=lowerCamelCase__ ,verification_mode=lowerCamelCase__ ,base_path=lowerCamelCase__ ,num_proc=self.num_proc ,)
SCREAMING_SNAKE_CASE = self.builder.as_dataset(
split="""train""" ,verification_mode=lowerCamelCase__ ,in_memory=self.keep_in_memory )
return dataset
| 296 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[int] = ["sentencepiece"]
def __init__( self : List[Any] ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[int] = ["sentencepiece"]
def __init__( self : Union[str, Any] ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[int] = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : Dict ) -> Any:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = ["sentencepiece"]
def __init__( self : str ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = ["sentencepiece"]
def __init__( self : Optional[int] ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[int] = ["sentencepiece"]
def __init__( self : Dict ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[int] = ["sentencepiece"]
def __init__( self : Optional[int] ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = ["sentencepiece"]
def __init__( self : Dict ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Tuple ) -> str:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Any = ["sentencepiece"]
def __init__( self : Optional[Any] ,*lowerCamelCase__ : str ,**lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = ["sentencepiece"]
def __init__( self : List[str] ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : Any ) -> Any:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : List[str] ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : str ) -> Dict:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[str] = ["sentencepiece"]
def __init__( self : str ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Tuple = ["sentencepiece"]
def __init__( self : Any ,*lowerCamelCase__ : str ,**lowerCamelCase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = ["sentencepiece"]
def __init__( self : List[str] ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : Any ) -> Any:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : Tuple ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = ["sentencepiece"]
def __init__( self : Dict ,*lowerCamelCase__ : int ,**lowerCamelCase__ : List[str] ) -> Dict:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : Optional[int] ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = ["sentencepiece"]
def __init__( self : Dict ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = ["sentencepiece"]
def __init__( self : Any ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : Any ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : Any ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : int ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[str] = ["sentencepiece"]
def __init__( self : List[Any] ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[str] = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : int ) -> Any:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[int] = ["sentencepiece"]
def __init__( self : List[Any] ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : int ) -> str:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = ["sentencepiece"]
def __init__( self : List[Any] ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = ["sentencepiece"]
def __init__( self : Any ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : int ) -> Any:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[int] = ["sentencepiece"]
def __init__( self : List[Any] ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = ["sentencepiece"]
def __init__( self : Optional[int] ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : str ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
| 296 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
__snake_case : Optional[str] = field(
default="linear" , metadata={"help": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 296 | 1 |
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(_SCREAMING_SNAKE_CASE ) * abs(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 296 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : List[str] = TextToVideoSDPipeline
__snake_case : int = TEXT_TO_IMAGE_PARAMS
__snake_case : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Dict = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") ,up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=0 ) -> List[Any]:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """np"""
SCREAMING_SNAKE_CASE = sd_pipe(**lowerCamelCase__ ).frames
SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
SCREAMING_SNAKE_CASE = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=25 ,output_type="""pt""" ).frames
SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type="""pt""" ).frames
SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 296 | 1 |
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = [], []
while len(_SCREAMING_SNAKE_CASE ) > 1:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
start.append(_SCREAMING_SNAKE_CASE )
end.append(_SCREAMING_SNAKE_CASE )
collection.remove(_SCREAMING_SNAKE_CASE )
collection.remove(_SCREAMING_SNAKE_CASE )
end.reverse()
return start + collection + end
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 296 |
def __lowercase ( _SCREAMING_SNAKE_CASE = 10 ) -> str:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or n < 0:
raise ValueError("""Invalid input""" )
SCREAMING_SNAKE_CASE = 10**n
SCREAMING_SNAKE_CASE = 2_84_33 * (pow(2 , 7_83_04_57 , _SCREAMING_SNAKE_CASE )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(1_0) = }''')
| 296 | 1 |
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE )
print("""The following activities are selected:""" )
# The first activity is always selected
SCREAMING_SNAKE_CASE = 0
print(_SCREAMING_SNAKE_CASE , end=""",""" )
# Consider rest of the activities
for j in range(_SCREAMING_SNAKE_CASE ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_SCREAMING_SNAKE_CASE , end=""",""" )
SCREAMING_SNAKE_CASE = j
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ = [1, 3, 0, 5, 8, 5]
SCREAMING_SNAKE_CASE_ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 296 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 296 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DPTConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE = 10_24
SCREAMING_SNAKE_CASE = 40_96
SCREAMING_SNAKE_CASE = 24
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = [5, 11, 17, 23]
SCREAMING_SNAKE_CASE = [2_56, 5_12, 10_24, 10_24]
SCREAMING_SNAKE_CASE = (1, 3_84, 3_84)
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = 1_50
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
SCREAMING_SNAKE_CASE = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
SCREAMING_SNAKE_CASE = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
SCREAMING_SNAKE_CASE = name.replace("""proj""" , """projection""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
SCREAMING_SNAKE_CASE = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
SCREAMING_SNAKE_CASE = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
SCREAMING_SNAKE_CASE = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
SCREAMING_SNAKE_CASE = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
SCREAMING_SNAKE_CASE = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
SCREAMING_SNAKE_CASE = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
SCREAMING_SNAKE_CASE = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
SCREAMING_SNAKE_CASE = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
SCREAMING_SNAKE_CASE = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
SCREAMING_SNAKE_CASE = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
SCREAMING_SNAKE_CASE = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
SCREAMING_SNAKE_CASE = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
SCREAMING_SNAKE_CASE = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
SCREAMING_SNAKE_CASE = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
SCREAMING_SNAKE_CASE = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
SCREAMING_SNAKE_CASE = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
SCREAMING_SNAKE_CASE = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
SCREAMING_SNAKE_CASE = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
SCREAMING_SNAKE_CASE = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowercase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = get_dpt_config(_SCREAMING_SNAKE_CASE )
# load original state_dict from URL
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE = state_dict.pop(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = val
# read in qkv matrices
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
SCREAMING_SNAKE_CASE = DPTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# Check outputs on an image
SCREAMING_SNAKE_CASE = 4_80 if """ade""" in checkpoint_url else 3_84
SCREAMING_SNAKE_CASE = DPTImageProcessor(size=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# forward pass
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits if """ade""" in checkpoint_url else model(**_SCREAMING_SNAKE_CASE ).predicted_depth
# Assert logits
SCREAMING_SNAKE_CASE = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(_SCREAMING_SNAKE_CASE )
assert (
torch.allclose(outputs[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _SCREAMING_SNAKE_CASE )
)
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
image_processor.push_to_hub(
repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 296 |
from pathlib import Path
import fire
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE )
dest_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
for path in src_dir.iterdir():
SCREAMING_SNAKE_CASE = [x.rstrip() for x in list(path.open().readlines() )][:n]
SCREAMING_SNAKE_CASE = dest_dir.joinpath(path.name )
print(_SCREAMING_SNAKE_CASE )
dest_path.open("""w""" ).write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
fire.Fire(minify)
| 296 | 1 |
from collections import deque
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = deque()
SCREAMING_SNAKE_CASE = [False for _ in range(_SCREAMING_SNAKE_CASE )]
SCREAMING_SNAKE_CASE = [-1 for _ in range(_SCREAMING_SNAKE_CASE )]
SCREAMING_SNAKE_CASE = index_of[:]
def strong_connect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = index # the number when this node is seen
SCREAMING_SNAKE_CASE = index # lowest rank node reachable from here
index += 1
stack.append(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = True
for w in g[v]:
if index_of[w] == -1:
SCREAMING_SNAKE_CASE = strong_connect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
SCREAMING_SNAKE_CASE = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = stack.pop()
SCREAMING_SNAKE_CASE = False
component.append(_SCREAMING_SNAKE_CASE )
while w != v:
SCREAMING_SNAKE_CASE = stack.pop()
SCREAMING_SNAKE_CASE = False
component.append(_SCREAMING_SNAKE_CASE )
components.append(_SCREAMING_SNAKE_CASE )
return index
SCREAMING_SNAKE_CASE = []
for v in range(_SCREAMING_SNAKE_CASE ):
if index_of[v] == -1:
strong_connect(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
return components
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for u, v in edges:
g[u].append(_SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
# Test
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
SCREAMING_SNAKE_CASE_ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
SCREAMING_SNAKE_CASE_ = [(u, v) for u, v in zip(source, target)]
SCREAMING_SNAKE_CASE_ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 296 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/dummy-config.json""")
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""fake-roberta""" )
os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ ,"""config.json""" ) ,"""w""" ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(type(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
try:
AutoConfig.register("""custom""" ,lowerCamelCase__ )
# Wrong model type will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoConfig.register("""model""" ,lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoConfig.register("""bert""" ,lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""bert-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,revision="""aaaaaa""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" ,):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_config.__class__.__name__ ,"""NewModelConfig""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = "new-model"
try:
AutoConfig.register("""new-model""" ,lowerCamelCase__ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 296 | 1 |
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
SCREAMING_SNAKE_CASE = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
SCREAMING_SNAKE_CASE = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )]
# Reverse whole list
SCREAMING_SNAKE_CASE = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 296 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = "git_vision_model"
def __init__( self : List[Any] ,lowerCamelCase__ : Dict=768 ,lowerCamelCase__ : Union[str, Any]=3072 ,lowerCamelCase__ : Optional[int]=12 ,lowerCamelCase__ : Tuple=12 ,lowerCamelCase__ : Tuple=3 ,lowerCamelCase__ : Optional[Any]=224 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]="quick_gelu" ,lowerCamelCase__ : Optional[Any]=1e-5 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : Optional[int]=0.02 ,**lowerCamelCase__ : Union[str, Any] ,) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = "git"
def __init__( self : Optional[int] ,lowerCamelCase__ : int=None ,lowerCamelCase__ : str=30522 ,lowerCamelCase__ : Tuple=768 ,lowerCamelCase__ : Union[str, Any]=6 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : List[str]=3072 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : List[str]=1024 ,lowerCamelCase__ : List[str]=0.02 ,lowerCamelCase__ : str=1e-1_2 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Optional[int]="absolute" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=False ,lowerCamelCase__ : int=101 ,lowerCamelCase__ : int=102 ,lowerCamelCase__ : Dict=None ,**lowerCamelCase__ : List[Any] ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,pad_token_id=lowerCamelCase__ ,**lowerCamelCase__ )
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE = GitVisionConfig(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = num_image_with_embedding
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 296 | 1 |
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
SCREAMING_SNAKE_CASE = False
if num < 0:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = -num
SCREAMING_SNAKE_CASE = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_SCREAMING_SNAKE_CASE ) for e in binary )
return "0b" + "".join(str(_SCREAMING_SNAKE_CASE ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = random.Random()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE = global_rng
SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : Optional[Any]=400 ,lowerCamelCase__ : List[str]=2000 ,lowerCamelCase__ : List[str]=2048 ,lowerCamelCase__ : Any=128 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : str=512 ,lowerCamelCase__ : Optional[Any]=30 ,lowerCamelCase__ : Tuple=44100 ,) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = min_seq_length
SCREAMING_SNAKE_CASE = max_seq_length
SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE = spectrogram_length
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = num_audio_channels
SCREAMING_SNAKE_CASE = hop_length
SCREAMING_SNAKE_CASE = chunk_length
SCREAMING_SNAKE_CASE = sampling_rate
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=False ) -> str:
'''simple docstring'''
def _flatten(lowerCamelCase__ : List[Any] ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : List[Any] = TvltFeatureExtractor
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TvltFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,"""spectrogram_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""feature_size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""num_audio_channels""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""hop_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""chunk_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""sampling_rate""" ) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""feat_extract.json""" )
feat_extract_first.to_json_file(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE = feature_extractor(
lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ,mask_audio=lowerCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(lowerCamelCase__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape ,(1, 1, 192, 128) )
SCREAMING_SNAKE_CASE = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,lowerCamelCase__ ,atol=1e-4 ) )
| 296 | 1 |
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("""Input value must be an 'int' type""" )
SCREAMING_SNAKE_CASE = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
SCREAMING_SNAKE_CASE = [1_44, 1_92, 2_40]
SCREAMING_SNAKE_CASE = [16, 32, 64, 96, 1_28, 1_60, 6_40]
elif "mobilevit_xs" in mobilevit_name:
SCREAMING_SNAKE_CASE = [96, 1_20, 1_44]
SCREAMING_SNAKE_CASE = [16, 32, 48, 64, 80, 96, 3_84]
elif "mobilevit_xxs" in mobilevit_name:
SCREAMING_SNAKE_CASE = [64, 80, 96]
SCREAMING_SNAKE_CASE = [16, 16, 24, 48, 64, 80, 3_20]
SCREAMING_SNAKE_CASE = 0.05
SCREAMING_SNAKE_CASE = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
SCREAMING_SNAKE_CASE = 5_12
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 21
SCREAMING_SNAKE_CASE = """pascal-voc-id2label.json"""
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
SCREAMING_SNAKE_CASE = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
SCREAMING_SNAKE_CASE = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
SCREAMING_SNAKE_CASE = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
SCREAMING_SNAKE_CASE = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
SCREAMING_SNAKE_CASE = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
SCREAMING_SNAKE_CASE = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
SCREAMING_SNAKE_CASE = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
SCREAMING_SNAKE_CASE = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
SCREAMING_SNAKE_CASE = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
SCREAMING_SNAKE_CASE = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
SCREAMING_SNAKE_CASE = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
SCREAMING_SNAKE_CASE = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
SCREAMING_SNAKE_CASE = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
SCREAMING_SNAKE_CASE = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
SCREAMING_SNAKE_CASE = """mobilevit.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict:
'''simple docstring'''
if base_model:
SCREAMING_SNAKE_CASE = """"""
else:
SCREAMING_SNAKE_CASE = """mobilevit."""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if key[:8] == "encoder.":
SCREAMING_SNAKE_CASE = key[8:]
if "qkv" in key:
SCREAMING_SNAKE_CASE = key.split(""".""" )
SCREAMING_SNAKE_CASE = int(key_split[0][6:] ) - 1
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
SCREAMING_SNAKE_CASE = layer.transformer.layer[transformer_num].attention.attention.all_head_size
SCREAMING_SNAKE_CASE = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[:dim]
SCREAMING_SNAKE_CASE = val[dim : dim * 2]
SCREAMING_SNAKE_CASE = val[-dim:]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowercase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_mobilevit_config(_SCREAMING_SNAKE_CASE )
# load original state_dict
SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval()
else:
SCREAMING_SNAKE_CASE = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
SCREAMING_SNAKE_CASE = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
else:
assert logits.shape == (1, 10_00)
if mobilevit_name == "mobilevit_s":
SCREAMING_SNAKE_CASE = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
SCREAMING_SNAKE_CASE = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
SCREAMING_SNAKE_CASE = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
SCREAMING_SNAKE_CASE = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
SCREAMING_SNAKE_CASE = model_mapping[mobilevit_name]
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 296 | 1 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE_ = {
"""b0""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 2_2_4,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 2_4_0,
"""dropout_rate""": 0.2,
"""dw_padding""": [1_6],
},
"""b2""": {
"""hidden_dim""": 1_4_0_8,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 2_6_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 1_6],
},
"""b3""": {
"""hidden_dim""": 1_5_3_6,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 3_0_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 1_8],
},
"""b4""": {
"""hidden_dim""": 1_7_9_2,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 3_8_0,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_0_4_8,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 4_5_6,
"""dropout_rate""": 0.4,
"""dw_padding""": [1_3, 2_7],
},
"""b6""": {
"""hidden_dim""": 2_3_0_4,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 5_2_8,
"""dropout_rate""": 0.5,
"""dw_padding""": [3_1],
},
"""b7""": {
"""hidden_dim""": 2_5_6_0,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 6_0_0,
"""dropout_rate""": 0.5,
"""dw_padding""": [1_8],
},
}
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = EfficientNetConfig()
SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""hidden_dim"""]
SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""width_coef"""]
SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""depth_coef"""]
SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""image_size"""]
SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""dropout_rate"""]
SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""dw_padding"""]
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __lowercase ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""image_size"""]
SCREAMING_SNAKE_CASE = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_SCREAMING_SNAKE_CASE , )
return preprocessor
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
SCREAMING_SNAKE_CASE = sorted(set(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )}
SCREAMING_SNAKE_CASE = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
SCREAMING_SNAKE_CASE = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
SCREAMING_SNAKE_CASE = {}
for item in rename_keys:
if item[0] in original_param_names:
SCREAMING_SNAKE_CASE = """efficientnet.""" + item[1]
SCREAMING_SNAKE_CASE = """classifier.weight"""
SCREAMING_SNAKE_CASE = """classifier.bias"""
return key_mapping
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
SCREAMING_SNAKE_CASE = key_mapping[key]
if "_conv" in key and "kernel" in key:
SCREAMING_SNAKE_CASE = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
SCREAMING_SNAKE_CASE = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
SCREAMING_SNAKE_CASE = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
SCREAMING_SNAKE_CASE = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE , weights="""imagenet""" , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=10_00 , classifier_activation="""softmax""" , )
SCREAMING_SNAKE_CASE = original_model.trainable_variables
SCREAMING_SNAKE_CASE = original_model.non_trainable_variables
SCREAMING_SNAKE_CASE = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
SCREAMING_SNAKE_CASE = param.numpy()
SCREAMING_SNAKE_CASE = list(tf_params.keys() )
# Load HuggingFace model
SCREAMING_SNAKE_CASE = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
SCREAMING_SNAKE_CASE = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
SCREAMING_SNAKE_CASE = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
SCREAMING_SNAKE_CASE = convert_image_processor(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = hf_model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = outputs.logits.detach().numpy()
# Original model inference
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""image_size"""]
SCREAMING_SNAKE_CASE = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
SCREAMING_SNAKE_CASE = image.img_to_array(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 )
SCREAMING_SNAKE_CASE = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
SCREAMING_SNAKE_CASE = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 296 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[str] = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : int = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[str] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 296 | 1 |
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
return 10 - x * x
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if equation(_SCREAMING_SNAKE_CASE ) * equation(_SCREAMING_SNAKE_CASE ) >= 0:
raise ValueError("""Wrong space!""" )
SCREAMING_SNAKE_CASE = a
while (b - a) >= 0.01:
# Find middle point
SCREAMING_SNAKE_CASE = (a + b) / 2
# Check if middle point is root
if equation(_SCREAMING_SNAKE_CASE ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_SCREAMING_SNAKE_CASE ) * equation(_SCREAMING_SNAKE_CASE ) < 0:
SCREAMING_SNAKE_CASE = c
else:
SCREAMING_SNAKE_CASE = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 296 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = "table-transformer"
__snake_case : Union[str, Any] = ["past_key_values"]
__snake_case : List[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Optional[int]=100 ,lowerCamelCase__ : List[Any]=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : List[Any]=8 ,lowerCamelCase__ : Dict=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : Any=8 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : int=0.0 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Optional[int]="relu" ,lowerCamelCase__ : Tuple=256 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : List[Any]=0.02 ,lowerCamelCase__ : int=1.0 ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Optional[Any]="sine" ,lowerCamelCase__ : List[str]="resnet50" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : Dict=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : Any=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : str=0.1 ,**lowerCamelCase__ : List[str] ,) -> Optional[int]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = backbone_config.get("""model_type""" )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = None, None, None
SCREAMING_SNAKE_CASE = use_timm_backbone
SCREAMING_SNAKE_CASE = backbone_config
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = num_queries
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = init_xavier_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = auxiliary_loss
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = backbone
SCREAMING_SNAKE_CASE = use_pretrained_backbone
SCREAMING_SNAKE_CASE = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE = class_cost
SCREAMING_SNAKE_CASE = bbox_cost
SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE = mask_loss_coefficient
SCREAMING_SNAKE_CASE = dice_loss_coefficient
SCREAMING_SNAKE_CASE = bbox_loss_coefficient
SCREAMING_SNAKE_CASE = giou_loss_coefficient
SCREAMING_SNAKE_CASE = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase__ ,**lowerCamelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.d_model
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> float:
'''simple docstring'''
return 1e-5
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
'''simple docstring'''
return 12
| 296 | 1 |
import math
import unittest
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) ,"""Zero doesn't have any positive factors, primes must have exactly two.""" ,)
self.assertFalse(
is_prime(1 ) ,"""One only has 1 positive factor, primes must have exactly two.""" ,)
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 296 |
from collections import defaultdict
from math import gcd
def __lowercase ( _SCREAMING_SNAKE_CASE = 1_50_00_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = defaultdict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _SCREAMING_SNAKE_CASE , 2 ):
if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > 1:
continue
SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_SCREAMING_SNAKE_CASE , limit + 1 , _SCREAMING_SNAKE_CASE ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 296 | 1 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ,return_dict=lowerCamelCase__ )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """google/ncsnpp-celebahq-256"""
SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 296 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Let's go
SCREAMING_SNAKE_CASE = parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE = args.func(_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 296 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = random.Random()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE = global_rng
SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : Optional[Any]=400 ,lowerCamelCase__ : List[str]=2000 ,lowerCamelCase__ : List[str]=2048 ,lowerCamelCase__ : Any=128 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : str=512 ,lowerCamelCase__ : Optional[Any]=30 ,lowerCamelCase__ : Tuple=44100 ,) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = min_seq_length
SCREAMING_SNAKE_CASE = max_seq_length
SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE = spectrogram_length
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = num_audio_channels
SCREAMING_SNAKE_CASE = hop_length
SCREAMING_SNAKE_CASE = chunk_length
SCREAMING_SNAKE_CASE = sampling_rate
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=False ) -> str:
'''simple docstring'''
def _flatten(lowerCamelCase__ : List[Any] ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : List[Any] = TvltFeatureExtractor
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TvltFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,"""spectrogram_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""feature_size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""num_audio_channels""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""hop_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""chunk_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""sampling_rate""" ) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""feat_extract.json""" )
feat_extract_first.to_json_file(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE = feature_extractor(
lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ,mask_audio=lowerCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(lowerCamelCase__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape ,(1, 1, 192, 128) )
SCREAMING_SNAKE_CASE = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,lowerCamelCase__ ,atol=1e-4 ) )
| 296 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SwinConfig()
SCREAMING_SNAKE_CASE = swin_name.split("""_""" )
SCREAMING_SNAKE_CASE = name_split[1]
SCREAMING_SNAKE_CASE = int(name_split[4] )
SCREAMING_SNAKE_CASE = int(name_split[3][-1] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE = 1_28
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE = 1_92
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (6, 12, 24, 48)
if "in22k" in swin_name:
SCREAMING_SNAKE_CASE = 2_18_41
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = img_size
SCREAMING_SNAKE_CASE = num_classes
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = window_size
return config
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
SCREAMING_SNAKE_CASE = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE = """layernorm.weight"""
if name == "norm.bias":
SCREAMING_SNAKE_CASE = """layernorm.bias"""
if "head" in name:
SCREAMING_SNAKE_CASE = name.replace("""head""" , """classifier""" )
else:
SCREAMING_SNAKE_CASE = """swin.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE = key.split(""".""" )
SCREAMING_SNAKE_CASE = int(key_split[1] )
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[
:dim
]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
SCREAMING_SNAKE_CASE = get_swin_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = SwinForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = timm_model(inputs["""pixel_values"""] )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 296 | 1 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""google/mt5-small""" )
SCREAMING_SNAKE_CASE = tokenizer("""Hello there""" ,return_tensors="""np""" ).input_ids
SCREAMING_SNAKE_CASE = tokenizer("""Hi I am""" ,return_tensors="""np""" ).input_ids
SCREAMING_SNAKE_CASE = shift_tokens_right(lowerCamelCase__ ,model.config.pad_token_id ,model.config.decoder_start_token_id )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,decoder_input_ids=lowerCamelCase__ ).logits
SCREAMING_SNAKE_CASE = optax.softmax_cross_entropy(lowerCamelCase__ ,onehot(lowerCamelCase__ ,logits.shape[-1] ) ).mean()
SCREAMING_SNAKE_CASE = -(labels.shape[-1] * loss.item())
SCREAMING_SNAKE_CASE = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 296 |
import os
from distutils.util import strtobool
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
for e in env_keys:
SCREAMING_SNAKE_CASE = int(os.environ.get(_SCREAMING_SNAKE_CASE , -1 ) )
if val >= 0:
return val
return default
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) )
return strtobool(_SCREAMING_SNAKE_CASE ) == 1 # As its name indicates `strtobool` actually returns an int...
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="no" ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) )
return value
| 296 | 1 |
def __lowercase ( _SCREAMING_SNAKE_CASE = 1_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 296 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__snake_case : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__snake_case : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Use FP16 to accelerate inference."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Benchmark training of model"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Verbose memory tracing"} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Trace memory line by line"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save result to a CSV file"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Save all print statements in a log file"} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to print environment information"} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__snake_case : str = field(
default=F"inference_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv."} , )
__snake_case : str = field(
default=F"inference_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__snake_case : str = field(
default=F"train_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__snake_case : str = field(
default=F"train_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__snake_case : str = field(
default=F"env_info_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving environment information."} , )
__snake_case : str = field(
default=F"log_{round(time() )}.csv" , metadata={"help": "Log filename used if print statements are saved in log."} , )
__snake_case : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" ,lowerCamelCase__ ,)
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) ,indent=2 )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 296 | 1 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Optional[int] = FunnelTokenizer
__snake_case : Union[str, Any] = FunnelTokenizerFast
__snake_case : List[Any] = True
__snake_case : Dict = True
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE = [
"""<unk>""",
"""<cls>""",
"""<sep>""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ,**lowerCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,**lowerCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase__ ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[7, 4, 5, 10, 8, 9] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE = tokenizer("""UNwant\u00E9d,running""" )
SCREAMING_SNAKE_CASE = len(inputs["""input_ids"""] ) - 1
self.assertListEqual(inputs["""token_type_ids"""] ,[2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE = tokenizer("""UNwant\u00E9d,running""" ,"""UNwant\u00E9d,running""" )
self.assertListEqual(inputs["""token_type_ids"""] ,[2] + [0] * sentence_len + [1] * sentence_len )
| 296 |
import math
import unittest
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) ,"""Zero doesn't have any positive factors, primes must have exactly two.""" ,)
self.assertFalse(
is_prime(1 ) ,"""One only has 1 positive factor, primes must have exactly two.""" ,)
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 296 | 1 |
from pathlib import Path
import fire
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE )
dest_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
for path in src_dir.iterdir():
SCREAMING_SNAKE_CASE = [x.rstrip() for x in list(path.open().readlines() )][:n]
SCREAMING_SNAKE_CASE = dest_dir.joinpath(path.name )
print(_SCREAMING_SNAKE_CASE )
dest_path.open("""w""" ).write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
fire.Fire(minify)
| 296 |
import random
class UpperCamelCase__ :
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : str ) -> tuple[list[int], list[int]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [ord(lowerCamelCase__ ) for i in text]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i in plain:
SCREAMING_SNAKE_CASE = random.randint(1 ,300 )
SCREAMING_SNAKE_CASE = (i + k) * k
cipher.append(lowerCamelCase__ )
key.append(lowerCamelCase__ )
return cipher, key
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : list[int] ,lowerCamelCase__ : list[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for i in range(len(lowerCamelCase__ ) ):
SCREAMING_SNAKE_CASE = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowerCamelCase__ ) )
return "".join(lowerCamelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 296 | 1 |
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
pass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
pass
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [
[],
[],
[],
]
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> None:
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(lowerCamelCase__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self : Tuple ) -> str:
'''simple docstring'''
return "\n".join(F"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : int ) -> None:
'''simple docstring'''
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
SCREAMING_SNAKE_CASE = min(self.queue )
self.queue.remove(lowerCamelCase__ )
return data
def __str__( self : List[str] ) -> str:
'''simple docstring'''
return str(self.queue )
def __lowercase ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __lowercase ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 296 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = "efficientformer"
def __init__( self : Optional[int] ,lowerCamelCase__ : List[int] = [3, 2, 6, 4] ,lowerCamelCase__ : List[int] = [48, 96, 224, 448] ,lowerCamelCase__ : List[bool] = [True, True, True, True] ,lowerCamelCase__ : int = 448 ,lowerCamelCase__ : int = 32 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : int = 7 ,lowerCamelCase__ : int = 5 ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : float = 1e-5 ,lowerCamelCase__ : str = "gelu" ,lowerCamelCase__ : float = 0.02 ,lowerCamelCase__ : float = 1e-1_2 ,lowerCamelCase__ : int = 224 ,lowerCamelCase__ : float = 1e-0_5 ,**lowerCamelCase__ : str ,) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_expansion_ratio
SCREAMING_SNAKE_CASE = downsamples
SCREAMING_SNAKE_CASE = dim
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = resolution
SCREAMING_SNAKE_CASE = pool_size
SCREAMING_SNAKE_CASE = downsample_patch_size
SCREAMING_SNAKE_CASE = downsample_stride
SCREAMING_SNAKE_CASE = downsample_pad
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = num_metaad_blocks
SCREAMING_SNAKE_CASE = distillation
SCREAMING_SNAKE_CASE = use_layer_scale
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = batch_norm_eps
| 296 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.