code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from random import randint, random
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :int , snake_case__ :int , snake_case__ :bool = False , snake_case__ :bool = False , snake_case__ :int = 5 , ) -> list:
_lowercase = [[-1] * number_of_cells] # Create a highway without any car
_lowercase = 0
_lowercase = max(snake_case__ , 0 )
while i < number_of_cells:
_lowercase = (
randint(0 , snake_case__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list , snake_case__ :int ) -> int:
_lowercase = 0
_lowercase = highway_now[car_index + 1 :]
for cell in range(len(snake_case__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(snake_case__ , -1 )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list , snake_case__ :float , snake_case__ :int ) -> list:
_lowercase = len(snake_case__ )
# Beforce calculations, the highway is empty
_lowercase = [-1] * number_of_cells
for car_index in range(snake_case__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_lowercase = min(highway_now[car_index] + 1 , snake_case__ )
# Number of empty cell before the next car
_lowercase = get_distance(snake_case__ , snake_case__ ) - 1
# We can't have the car causing an accident
_lowercase = min(next_highway[car_index] , snake_case__ )
if random() < probability:
# Randomly, a driver will slow down
_lowercase = max(next_highway[car_index] - 1 , 0 )
return next_highway
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list , snake_case__ :int , snake_case__ :float , snake_case__ :int ) -> list:
_lowercase = len(highway[0] )
for i in range(snake_case__ ):
_lowercase = update(highway[i] , snake_case__ , snake_case__ )
_lowercase = [-1] * number_of_cells
for car_index in range(snake_case__ ):
_lowercase = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_lowercase = (car_index + speed) % number_of_cells
# Commit the change of position
_lowercase = speed
highway.append(snake_case__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''dpr'''
def __init__( self : int ,__A : Union[str, Any]=3_0522 ,__A : Optional[int]=768 ,__A : int=12 ,__A : List[Any]=12 ,__A : Optional[Any]=3072 ,__A : Union[str, Any]="gelu" ,__A : Union[str, Any]=0.1 ,__A : List[Any]=0.1 ,__A : str=512 ,__A : List[str]=2 ,__A : Tuple=0.02 ,__A : Tuple=1e-12 ,__A : List[Any]=0 ,__A : List[str]="absolute" ,__A : int = 0 ,**__A : int ,) -> Tuple:
super().__init__(pad_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = projection_dim
_lowercase = position_embedding_type | 67 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case = logging.get_logger(__name__)
snake_case = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
snake_case = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE_ : Any = GPTaTokenizer
def __init__( self : int ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : List[str]=None ,__A : Any="<|endoftext|>" ,__A : int="<|endoftext|>" ,__A : List[str]="<|endoftext|>" ,__A : str=False ,**__A : Union[str, Any] ,) -> int:
super().__init__(
__A ,__A ,tokenizer_file=__A ,unk_token=__A ,bos_token=__A ,eos_token=__A ,add_prefix_space=__A ,**__A ,)
_lowercase = kwargs.pop('add_bos_token' ,__A )
_lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,__A ) != add_prefix_space:
_lowercase = getattr(__A ,pre_tok_state.pop('type' ) )
_lowercase = add_prefix_space
_lowercase = pre_tok_class(**__A )
_lowercase = add_prefix_space
def __UpperCAmelCase ( self : Any ,*__A : List[Any] ,**__A : Tuple ) -> BatchEncoding:
_lowercase = kwargs.get('is_split_into_words' ,__A )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A ,**__A )
def __UpperCAmelCase ( self : int ,*__A : Any ,**__A : Any ) -> BatchEncoding:
_lowercase = kwargs.get('is_split_into_words' ,__A )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A ,**__A )
def __UpperCAmelCase ( self : int ,__A : str ,__A : Optional[str] = None ) -> Tuple[str]:
_lowercase = self._tokenizer.model.save(__A ,name=__A )
return tuple(__A )
def __UpperCAmelCase ( self : List[Any] ,__A : "Conversation" ) -> List[int]:
_lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__A ,add_special_tokens=__A ) + [self.eos_token_id] )
if len(__A ) > self.model_max_length:
_lowercase = input_ids[-self.model_max_length :]
return input_ids | 67 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main() | 67 | 1 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Any ,__A : Tuple=None ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : int="resnet50" ,__A : int=3 ,__A : List[Any]=32 ,__A : Tuple=3 ,__A : List[Any]=True ,__A : Tuple=True ,) -> Any:
_lowercase = parent
_lowercase = out_indices if out_indices is not None else [4]
_lowercase = stage_names
_lowercase = out_features
_lowercase = backbone
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = use_pretrained_backbone
_lowercase = is_training
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __UpperCAmelCase ( self : Any ,__A : Any ,__A : Dict ) -> Union[str, Any]:
_lowercase = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = TimmBackboneModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : int ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = 'resnet18'
_lowercase = 'microsoft/resnet-18'
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A )
_lowercase = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A ,out_indices=[1, 2, 3] )
_lowercase = AutoBackbone.from_pretrained(__A ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : int ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
_lowercase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase = self.all_model_classes[0]
_lowercase = model_class(__A )
model.to(__A )
_lowercase = self._prepare_for_class(__A ,__A )
_lowercase = model(**__A )
_lowercase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase = copy.deepcopy(__A )
_lowercase = None
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
_lowercase = copy.deepcopy(__A )
_lowercase = False
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A ) | 67 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''big_bird'''
def __init__( self : str ,__A : Union[str, Any]=5_0358 ,__A : Any=768 ,__A : List[str]=12 ,__A : Union[str, Any]=12 ,__A : int=3072 ,__A : Tuple="gelu_new" ,__A : Any=0.1 ,__A : Optional[Any]=0.1 ,__A : Tuple=4096 ,__A : int=2 ,__A : Union[str, Any]=0.02 ,__A : Optional[int]=1e-12 ,__A : List[str]=True ,__A : List[Any]=0 ,__A : Optional[Any]=1 ,__A : Optional[int]=2 ,__A : Optional[int]=66 ,__A : Tuple="block_sparse" ,__A : Optional[int]=True ,__A : Optional[int]=False ,__A : Tuple=64 ,__A : str=3 ,__A : Optional[int]=None ,**__A : Dict ,) -> Union[str, Any]:
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,sep_token_id=__A ,**__A ,)
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
_lowercase = use_cache
_lowercase = rescale_embeddings
_lowercase = attention_type
_lowercase = use_bias
_lowercase = block_size
_lowercase = num_random_blocks
_lowercase = classifier_dropout
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 67 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
_lowercase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=snake_case__ , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=snake_case__ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=snake_case__ )
return parser.parse_args()
def SCREAMING_SNAKE_CASE__ ( ) -> int:
_lowercase = parse_args()
# Import training_script as a module.
_lowercase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_lowercase = script_fpath.stem
_lowercase = importlib.import_module(snake_case__ )
# Patch sys.argv
_lowercase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 67 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list:
_lowercase = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
_lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 | 1 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.dummy_uncond_unet
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ).images
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ,return_dict=__A )[0]
_lowercase = image[0, -3:, -3:, -1]
_lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = 'google/ncsnpp-celebahq-256'
_lowercase = UNetaDModel.from_pretrained(__A )
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=20 ,generator=__A ,output_type='numpy' ).images
_lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 67 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff | 67 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
snake_case = get_logger()
snake_case = None
class A_ ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
"""simple docstring"""
def __init__( self : List[Any] ,__A : Tuple=None ,__A : int=None ,**__A : Any ) -> str:
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A ,__A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowercase = device if isinstance(__A ,__A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowercase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowercase = str(jax.devices()[0] )
_lowercase = jnp_array_kwargs
@staticmethod
def __UpperCAmelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(__A ): device for device in jax.devices()}
def __UpperCAmelCase ( self : Union[str, Any] ,__A : Optional[int] ) -> Optional[int]:
import jax
import jax.numpy as jnp
if isinstance(__A ,__A ) and column:
if all(
isinstance(__A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A ,axis=0 )
return column
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ) -> Dict:
import jax
import jax.numpy as jnp
if isinstance(__A ,(str, bytes, type(__A )) ):
return value
elif isinstance(__A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowercase = {}
if isinstance(__A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowercase = {'dtype': jnp.intaa}
else:
_lowercase = {'dtype': jnp.intaa}
elif isinstance(__A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowercase = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A ,PIL.Image.Image ):
_lowercase = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowercase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __UpperCAmelCase ( self : Optional[Any] ,__A : int ) -> List[str]:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A ,'__array__' ) and not isinstance(__A ,jax.Array ):
_lowercase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def __UpperCAmelCase ( self : Optional[Any] ,__A : dict ) -> Tuple:
return map_nested(self._recursive_tensorize ,__A ,map_list=__A )
def __UpperCAmelCase ( self : List[Any] ,__A : pa.Table ) -> Mapping:
_lowercase = self.numpy_arrow_extractor().extract_row(__A )
_lowercase = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def __UpperCAmelCase ( self : Union[str, Any] ,__A : pa.Table ) -> "jax.Array":
_lowercase = self.numpy_arrow_extractor().extract_column(__A )
_lowercase = self.python_features_decoder.decode_column(__A ,pa_table.column_names[0] )
_lowercase = self.recursive_tensorize(__A )
_lowercase = self._consolidate(__A )
return column
def __UpperCAmelCase ( self : Dict ,__A : pa.Table ) -> Mapping:
_lowercase = self.numpy_arrow_extractor().extract_batch(__A )
_lowercase = self.python_features_decoder.decode_batch(__A )
_lowercase = self.recursive_tensorize(__A )
for column_name in batch:
_lowercase = self._consolidate(batch[column_name] )
return batch | 67 |
from manim import *
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = Rectangle(height=0.5 ,width=0.5 )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowercase = Rectangle(height=0.25 ,width=0.25 )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('CPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(4 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('GPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
gpu.move_to([-1, -1, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('Model' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
model.move_to([3, -1.0, 0] )
self.add(__A )
_lowercase = []
_lowercase = []
for i, rect in enumerate(__A ):
_lowercase = fill.copy().set_fill(__A ,opacity=0.8 )
target.move_to(__A )
model_arr.append(__A )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__A ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__A )
self.add(*__A ,*__A )
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('Disk' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
disk.move_to([-4, -1.25, 0] )
self.add(__A ,__A )
_lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__A ,__A )
_lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__A ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__A )
_lowercase = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ) )
_lowercase = Square(0.3 )
input.set_fill(__A ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__A ,buff=0.5 )
self.play(Write(__A ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__A ,buff=0.02 )
self.play(MoveToTarget(__A ) )
self.play(FadeOut(__A ) )
_lowercase = Arrow(start=__A ,end=__A ,color=__A ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__A ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowercase = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) )
_lowercase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__A ) ,Circumscribe(model_arr[0] ,color=__A ,**__A ) ,Circumscribe(model_cpu_arr[0] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__A ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_lowercase = AnimationGroup(
FadeOut(__A ,run_time=0.5 ) ,MoveToTarget(__A ,run_time=0.5 ) ,FadeIn(__A ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowercase = 0.7
self.play(
Circumscribe(model_arr[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,Circumscribe(model_arr[i + 1] ,color=__A ,**__A ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__A ,**__A ) ,Circumscribe(cpu_left_col_base[-1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowercase = a_c
_lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__A ) ,FadeOut(__A ,run_time=0.5 ) ,)
_lowercase = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) ,MoveToTarget(__A ) )
self.wait() | 67 | 1 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any , snake_case__ :str , snake_case__ :str ) -> Optional[Any]:
# Initialise PyTorch model
_lowercase = MobileBertConfig.from_json_file(snake_case__ )
print(F"""Building PyTorch model from configuration: {config}""" )
_lowercase = MobileBertForPreTraining(snake_case__ )
# Load weights from tf checkpoint
_lowercase = load_tf_weights_in_mobilebert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path) | 67 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Any ,__A : Tuple=None ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : int="resnet50" ,__A : int=3 ,__A : List[Any]=32 ,__A : Tuple=3 ,__A : List[Any]=True ,__A : Tuple=True ,) -> Any:
_lowercase = parent
_lowercase = out_indices if out_indices is not None else [4]
_lowercase = stage_names
_lowercase = out_features
_lowercase = backbone
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = use_pretrained_backbone
_lowercase = is_training
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __UpperCAmelCase ( self : Any ,__A : Any ,__A : Dict ) -> Union[str, Any]:
_lowercase = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = TimmBackboneModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : int ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = 'resnet18'
_lowercase = 'microsoft/resnet-18'
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A )
_lowercase = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A ,out_indices=[1, 2, 3] )
_lowercase = AutoBackbone.from_pretrained(__A ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : int ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
_lowercase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase = self.all_model_classes[0]
_lowercase = model_class(__A )
model.to(__A )
_lowercase = self._prepare_for_class(__A ,__A )
_lowercase = model(**__A )
_lowercase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase = copy.deepcopy(__A )
_lowercase = None
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
_lowercase = copy.deepcopy(__A )
_lowercase = False
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A ) | 67 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = BertJapaneseTokenizer
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : int ) -> Optional[int]:
super().setUp()
_lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
_lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self : int ,__A : Dict ) -> Union[str, Any]:
_lowercase = 'こんにちは、世界。 \nこんばんは、世界。'
_lowercase = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ,__A : Union[str, Any] ) -> List[str]:
_lowercase , _lowercase = self.get_input_output_texts(__A )
_lowercase = tokenizer.encode(__A ,add_special_tokens=__A )
_lowercase = tokenizer.decode(__A ,clean_up_tokenization_spaces=__A )
return text, ids
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
pass # TODO add if relevant
def __UpperCAmelCase ( self : Dict ) -> str:
pass # TODO add if relevant
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass # TODO add if relevant
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
_lowercase = self.tokenizer_class(self.vocab_file )
_lowercase = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(__A ,['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = self.tokenizer_class(self.vocab_file ,word_tokenizer_type='mecab' )
self.assertIsNotNone(__A )
_lowercase = 'こんにちは、世界。\nこんばんは、世界。'
_lowercase = tokenizer.tokenize(__A )
self.assertListEqual(__A ,['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowercase = os.path.join(self.tmpdirname ,'tokenizer.bin' )
with open(__A ,'wb' ) as handle:
pickle.dump(__A ,__A )
with open(__A ,'rb' ) as handle:
_lowercase = pickle.load(__A )
_lowercase = tokenizer_new.tokenize(__A )
self.assertListEqual(__A ,__A )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
_lowercase = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] ,)
def __UpperCAmelCase ( self : Optional[int] ) -> str:
try:
_lowercase = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] ,)
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
try:
_lowercase = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] ,)
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
_lowercase = MecabTokenizer(do_lower_case=__A ,mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
try:
_lowercase = MecabTokenizer(
do_lower_case=__A ,normalize_text=__A ,mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
_lowercase = MecabTokenizer(normalize_text=__A ,mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] ,)
@require_sudachi
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
_lowercase = self.tokenizer_class(self.vocab_file ,word_tokenizer_type='sudachi' )
self.assertIsNotNone(__A )
_lowercase = 'こんにちは、世界。\nこんばんは、世界。'
_lowercase = tokenizer.tokenize(__A )
self.assertListEqual(__A ,['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowercase = os.path.join(self.tmpdirname ,'tokenizer.bin' )
with open(__A ,'wb' ) as handle:
pickle.dump(__A ,__A )
with open(__A ,'rb' ) as handle:
_lowercase = pickle.load(__A )
_lowercase = tokenizer_new.tokenize(__A )
self.assertListEqual(__A ,__A )
@require_sudachi
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,[' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] ,)
@require_sudachi
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = SudachiTokenizer(sudachi_dict_type='core' ,sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) ,['外国', '人', '参政', '権'] )
@require_sudachi
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
_lowercase = SudachiTokenizer(sudachi_dict_type='core' ,sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) ,['外国人', '参政権'] )
@require_sudachi
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
_lowercase = SudachiTokenizer(sudachi_dict_type='core' ,sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) ,['外国人参政権'] )
@require_sudachi
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
_lowercase = SudachiTokenizer(do_lower_case=__A ,sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,[' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] ,)
@require_sudachi
def __UpperCAmelCase ( self : Dict ) -> Any:
_lowercase = SudachiTokenizer(normalize_text=__A ,sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,[' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] ,)
@require_sudachi
def __UpperCAmelCase ( self : int ) -> List[Any]:
_lowercase = SudachiTokenizer(trim_whitespace=__A ,sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] ,)
@require_jumanpp
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
_lowercase = self.tokenizer_class(self.vocab_file ,word_tokenizer_type='jumanpp' )
self.assertIsNotNone(__A )
_lowercase = 'こんにちは、世界。\nこんばんは、世界。'
_lowercase = tokenizer.tokenize(__A )
self.assertListEqual(__A ,['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowercase = os.path.join(self.tmpdirname ,'tokenizer.bin' )
with open(__A ,'wb' ) as handle:
pickle.dump(__A ,__A )
with open(__A ,'rb' ) as handle:
_lowercase = pickle.load(__A )
_lowercase = tokenizer_new.tokenize(__A )
self.assertListEqual(__A ,__A )
@require_jumanpp
def __UpperCAmelCase ( self : int ) -> Dict:
_lowercase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] ,)
@require_jumanpp
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = JumanppTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] ,)
@require_jumanpp
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
_lowercase = JumanppTokenizer(normalize_text=__A )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] ,)
@require_jumanpp
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
_lowercase = JumanppTokenizer(trim_whitespace=__A )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] ,)
@require_jumanpp
def __UpperCAmelCase ( self : Tuple ) -> Any:
_lowercase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) ,['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] ,)
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
_lowercase = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
_lowercase = {}
for i, token in enumerate(__A ):
_lowercase = i
_lowercase = WordpieceTokenizer(vocab=__A ,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) ,[] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) ,['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) ,['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) ,['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
_lowercase = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
_lowercase = tokenizer.subword_tokenizer
_lowercase = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(__A ,['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
_lowercase = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(__A ,['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
_lowercase = tokenizer.encode('ありがとう。' ,add_special_tokens=__A )
_lowercase = tokenizer.encode('どういたしまして。' ,add_special_tokens=__A )
_lowercase = tokenizer.build_inputs_with_special_tokens(__A )
_lowercase = tokenizer.build_inputs_with_special_tokens(__A ,__A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = BertJapaneseTokenizer
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
super().setUp()
_lowercase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
_lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self : Optional[Any] ,**__A : Dict ) -> int:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname ,subword_tokenizer_type='character' ,**__A )
def __UpperCAmelCase ( self : Union[str, Any] ,__A : List[str] ) -> int:
_lowercase = 'こんにちは、世界。 \nこんばんは、世界。'
_lowercase = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
pass # TODO add if relevant
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
pass # TODO add if relevant
def __UpperCAmelCase ( self : Any ) -> Any:
pass # TODO add if relevant
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
_lowercase = self.tokenizer_class(self.vocab_file ,subword_tokenizer_type='character' )
_lowercase = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
__A ,['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ) ,[3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
_lowercase = {}
for i, token in enumerate(__A ):
_lowercase = i
_lowercase = CharacterTokenizer(vocab=__A ,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) ,[] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) ,['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) ,['こ', 'ん', 'に', 'ち', '[UNK]'] )
def __UpperCAmelCase ( self : Tuple ) -> int:
_lowercase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
_lowercase = tokenizer.encode('ありがとう。' ,add_special_tokens=__A )
_lowercase = tokenizer.encode('どういたしまして。' ,add_special_tokens=__A )
_lowercase = tokenizer.build_inputs_with_special_tokens(__A )
_lowercase = tokenizer.build_inputs_with_special_tokens(__A ,__A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> int:
_lowercase = 'cl-tohoku/bert-base-japanese'
_lowercase = AutoTokenizer.from_pretrained(__A )
self.assertIsInstance(__A ,__A )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' ,level='WARNING' ) as cm:
BertTokenizer.from_pretrained(__A )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
_lowercase = 'bert-base-cased'
with self.assertLogs('transformers' ,level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(__A )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) ) | 67 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=__A ,)
assert hasattr(self ,'env' )
def __UpperCAmelCase ( self : str ,__A : Tuple ) -> int:
# configuration for running training on smdistributed Model Parallel
_lowercase = {
'enabled': True,
'processes_per_host': 8,
}
_lowercase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowercase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowercase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__A ,instance_type=self.instance_type ,debugger_hook_config=__A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=__A ,py_version='py36' ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Any ) -> Optional[Any]:
TrainingJobAnalytics(__A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Optional[Any]:
# create estimator
_lowercase = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__A ) | 67 |
snake_case = {str(digit): digit**5 for digit in range(1_0)}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case__ ) )
if __name__ == "__main__":
print(solution()) | 67 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> str:
_lowercase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_lowercase = 1024
_lowercase = 4096
_lowercase = 24
_lowercase = 16
_lowercase = [5, 11, 17, 23]
_lowercase = [256, 512, 1024, 1024]
_lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = [256, 512, 768, 768]
_lowercase = 150
_lowercase = 16
_lowercase = (1, 384, 384)
_lowercase = False
_lowercase = 'project'
if "ade" in checkpoint_url:
_lowercase = True
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = 150
_lowercase = 16
_lowercase = 'huggingface/label-files'
_lowercase = 'ade20k-id2label.json'
_lowercase = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
_lowercase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowercase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowercase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowercase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_lowercase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowercase = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_lowercase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowercase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowercase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowercase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowercase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowercase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowercase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowercase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_lowercase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowercase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowercase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowercase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowercase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowercase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowercase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowercase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowercase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowercase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowercase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowercase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowercase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowercase = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowercase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowercase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowercase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_lowercase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_lowercase = name.replace('..' , '.' )
if "stem.conv" in name:
_lowercase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_lowercase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_lowercase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_lowercase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_lowercase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_lowercase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :int ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[: config.hidden_size, :]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Any , snake_case__ :List[str] ) -> str:
_lowercase , _lowercase = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowercase = torch.load(snake_case__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
_lowercase = state_dict.pop(snake_case__ )
_lowercase = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
_lowercase = DPTForSemanticSegmentation(snake_case__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
_lowercase = 480 if 'ade' in checkpoint_url else 384
_lowercase = DPTImageProcessor(size=snake_case__ )
_lowercase = prepare_img()
_lowercase = image_processor(snake_case__ , return_tensors='pt' )
# forward pass
_lowercase = model(**snake_case__ ).logits if 'ade' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
_lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 67 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> list[int]:
_lowercase = str(snake_case__ )
_lowercase = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 11 ) -> list[int]:
_lowercase = []
_lowercase = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
_lowercase = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""") | 67 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = '''perceiver'''
def __init__( self : Optional[Any] ,__A : int=256 ,__A : List[Any]=1280 ,__A : List[Any]=768 ,__A : List[str]=1 ,__A : Dict=26 ,__A : Union[str, Any]=8 ,__A : Union[str, Any]=8 ,__A : List[Any]=None ,__A : str=None ,__A : str="kv" ,__A : Union[str, Any]=1 ,__A : Any=1 ,__A : Any="gelu" ,__A : Optional[Any]=0.1 ,__A : Dict=0.02 ,__A : Union[str, Any]=1e-12 ,__A : List[Any]=True ,__A : List[Any]=262 ,__A : Any=2048 ,__A : str=56 ,__A : Tuple=[368, 496] ,__A : Union[str, Any]=16 ,__A : List[str]=1920 ,__A : List[Any]=16 ,__A : str=[1, 16, 224, 224] ,**__A : Tuple ,) -> int:
super().__init__(**__A )
_lowercase = num_latents
_lowercase = d_latents
_lowercase = d_model
_lowercase = num_blocks
_lowercase = num_self_attends_per_block
_lowercase = num_self_attention_heads
_lowercase = num_cross_attention_heads
_lowercase = qk_channels
_lowercase = v_channels
_lowercase = cross_attention_shape_for_attention
_lowercase = self_attention_widening_factor
_lowercase = cross_attention_widening_factor
_lowercase = hidden_act
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = use_query_residual
# masked language modeling attributes
_lowercase = vocab_size
_lowercase = max_position_embeddings
# image classification attributes
_lowercase = image_size
# flow attributes
_lowercase = train_size
# multimodal autoencoding attributes
_lowercase = num_frames
_lowercase = audio_samples_per_frame
_lowercase = samples_per_patch
_lowercase = output_shape
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def __UpperCAmelCase ( self : int ) -> float:
return 1e-4
def __UpperCAmelCase ( self : Union[str, Any] ,__A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,__A : int = -1 ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,__A : int = 3 ,__A : int = 40 ,__A : int = 40 ,) -> Mapping[str, Any]:
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__A ,__A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = preprocessor.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join(['a'] ) * seq_length] * batch_size
_lowercase = dict(preprocessor(__A ,return_tensors=__A ) )
_lowercase = inputs.pop('input_ids' )
return inputs
elif isinstance(__A ,__A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(__A ,fixed_dimension=OnnxConfig.default_fixed_batch )
_lowercase = self._generate_dummy_images(__A ,__A ,__A ,__A )
_lowercase = dict(preprocessor(images=__A ,return_tensors=__A ) )
_lowercase = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' ) | 67 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : Tuple ,__A : Any=99 ,__A : Any=13 ,__A : Dict=7 ,__A : List[Any]=9 ,__A : Dict=True ,__A : Any=True ,__A : Tuple=False ,__A : str=32 ,__A : int=5 ,__A : List[str]=4 ,__A : Optional[Any]=37 ,__A : int=8 ,__A : Any=0.1 ,__A : Dict=0.002 ,__A : Union[str, Any]=1 ,__A : Optional[Any]=0 ,__A : int=0 ,__A : Tuple=None ,__A : str=None ,) -> List[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = encoder_seq_length
_lowercase = decoder_seq_length
# For common tests
_lowercase = self.decoder_seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = d_ff
_lowercase = relative_attention_num_buckets
_lowercase = dropout_rate
_lowercase = initializer_factor
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = decoder_start_token_id
_lowercase = None
_lowercase = decoder_layers
def __UpperCAmelCase ( self : Dict ) -> Dict:
return TaConfig.from_pretrained('google/umt5-base' )
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ,__A : int ,__A : str ,__A : List[str]=None ,__A : List[str]=None ,__A : Any=None ,__A : List[Any]=None ,__A : str=None ,) -> Tuple:
if attention_mask is None:
_lowercase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowercase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowercase = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=__A )
if decoder_head_mask is None:
_lowercase = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=__A )
if cross_attn_head_mask is None:
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowercase = input_ids.clamp(self.pad_token_id + 1 )
_lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowercase = self.get_config()
_lowercase = config.num_attention_heads
_lowercase = self.prepare_inputs_dict(__A ,__A ,__A )
return config, input_dict
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase , _lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Dict ) -> Any:
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : List[str] ,__A : Dict ,__A : List[str] ,__A : List[Any] ,__A : Tuple ,__A : int ,) -> Tuple:
_lowercase = UMTaModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
input_ids=__A ,decoder_input_ids=__A ,attention_mask=__A ,decoder_attention_mask=__A ,)
_lowercase = model(input_ids=__A ,decoder_input_ids=__A )
_lowercase = result.last_hidden_state
_lowercase = result.past_key_values
_lowercase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def __UpperCAmelCase ( self : List[Any] ,__A : Tuple ,__A : int ,__A : Any ,__A : Tuple ,__A : Any ,__A : Optional[int] ,) -> List[str]:
_lowercase = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
_lowercase = model(__A ,use_cache=__A )
_lowercase = model(__A )
_lowercase = model(__A ,use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase = model(__A )['last_hidden_state']
_lowercase = model(__A ,past_key_values=__A )['last_hidden_state']
# select random slice
_lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1e-3 ) )
def __UpperCAmelCase ( self : Any ,__A : List[str] ,__A : List[str] ,) -> int:
_lowercase = UMTaModel(config=__A ).to(__A ).half().eval()
_lowercase = model(**__A )['last_hidden_state']
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ : Dict = [0.8, 0.9]
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=__A ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = config_and_inputs[0]
_lowercase = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
_lowercase = {
'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=__A ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
}
for attn_name, (name, mask) in zip(__A ,head_masking.items() ):
_lowercase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=__A )
_lowercase = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=__A ,return_dict_in_generate=__A ,**__A ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __UpperCAmelCase ( self : str ) -> List[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=__A ).to(__A )
_lowercase = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=__A ,legacy=__A )
_lowercase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowercase = tokenizer(__A ,return_tensors='pt' ,padding=__A ).input_ids
# fmt: off
_lowercase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A ,__A )
_lowercase = model.generate(input_ids.to(__A ) )
_lowercase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowercase = tokenizer.batch_decode(__A )
self.assertEqual(__A ,__A ) | 67 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class A_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : float
SCREAMING_SNAKE_CASE_ : TreeNode | None = None
SCREAMING_SNAKE_CASE_ : TreeNode | None = None
def SCREAMING_SNAKE_CASE__ ( snake_case__ :TreeNode | None ) -> bool:
# Validation
def is_valid_tree(snake_case__ :TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(snake_case__ , snake_case__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(snake_case__ ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
snake_case__ :TreeNode | None , snake_case__ :float , snake_case__ :float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , snake_case__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , snake_case__ )
)
return is_binary_search_tree_recursive_check(snake_case__ , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=__A ,)
assert hasattr(self ,'env' )
def __UpperCAmelCase ( self : str ,__A : Tuple ) -> int:
# configuration for running training on smdistributed Model Parallel
_lowercase = {
'enabled': True,
'processes_per_host': 8,
}
_lowercase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowercase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowercase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__A ,instance_type=self.instance_type ,debugger_hook_config=__A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=__A ,py_version='py36' ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Any ) -> Optional[Any]:
TrainingJobAnalytics(__A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Optional[Any]:
# create estimator
_lowercase = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__A ) | 67 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""ChineseCLIPFeatureExtractor"""]
snake_case = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A ) | 67 | 1 |
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray ) -> bool:
return np.array_equal(snake_case__ , matrix.conjugate().T )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray , snake_case__ :np.ndarray ) -> Any:
_lowercase = v.conjugate().T
_lowercase = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
_lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 67 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.dummy_uncond_unet
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ).images
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ,return_dict=__A )[0]
_lowercase = image[0, -3:, -3:, -1]
_lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = 'google/ncsnpp-celebahq-256'
_lowercase = UNetaDModel.from_pretrained(__A )
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=20 ,generator=__A ,output_type='numpy' ).images
_lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 67 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> list:
_lowercase = len(snake_case__ )
_lowercase = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
_lowercase = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
_lowercase = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 67 | 1 |
from __future__ import annotations
import requests
snake_case = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :int = 1 , snake_case__ :str = "new" , snake_case__ :list | None = None ) -> dict:
_lowercase = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(snake_case__ ) - valid_terms ) ):
_lowercase = F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(snake_case__ )
_lowercase = requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'User-agent': 'A random string'} , )
if response.status_code == 429:
raise requests.HTTPError
_lowercase = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(snake_case__ )}
_lowercase = {}
for id_ in range(snake_case__ ):
_lowercase = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""])) | 67 |
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray ) -> bool:
return np.array_equal(snake_case__ , matrix.conjugate().T )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray , snake_case__ :np.ndarray ) -> Any:
_lowercase = v.conjugate().T
_lowercase = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
_lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 67 | 1 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(snake_case__ , snake_case__ ):
raise TypeError('Input value must be a \'int\' type' )
return bin(snake_case__ ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : Dict ,__A : List[Any]=7 ,__A : Dict=3 ,__A : Tuple=30 ,__A : Dict=400 ,__A : Any=True ,__A : List[Any]=None ,__A : Any=True ,__A : List[str]=[0.5, 0.5, 0.5] ,__A : Union[str, Any]=[0.5, 0.5, 0.5] ,__A : int=True ,__A : List[str]=1 / 255 ,__A : Union[str, Any]=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_pad
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : List[str]=False ) -> Union[str, Any]:
if not batched:
_lowercase = image_inputs[0]
if isinstance(__A ,Image.Image ):
_lowercase , _lowercase = image.size
else:
_lowercase , _lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size['shortest_edge'] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size['shortest_edge'] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase , _lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(__A ,key=lambda __A : item[0] )[0]
_lowercase = max(__A ,key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'image_mean' ) )
self.assertTrue(hasattr(__A ,'image_std' ) )
self.assertTrue(hasattr(__A ,'do_normalize' ) )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'do_rescale' ) )
self.assertTrue(hasattr(__A ,'do_pad' ) )
self.assertTrue(hasattr(__A ,'size' ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# prepare image and target
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowercase = DetaImageProcessor()
_lowercase = image_processing(images=__A ,annotations=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowercase = DetaImageProcessor(format='coco_panoptic' )
_lowercase = image_processing(images=__A ,annotations=__A ,masks_path=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify masks
_lowercase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__A )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) ) | 67 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''Speech2TextFeatureExtractor'''
SCREAMING_SNAKE_CASE_ : int = '''Speech2TextTokenizer'''
def __init__( self : Dict ,__A : Dict ,__A : str ) -> Dict:
super().__init__(__A ,__A )
_lowercase = self.feature_extractor
_lowercase = False
def __call__( self : Any ,*__A : List[str] ,**__A : List[Any] ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A ,**__A )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
_lowercase = kwargs.pop('raw_speech' )
else:
_lowercase = kwargs.pop('audio' ,__A )
_lowercase = kwargs.pop('sampling_rate' ,__A )
_lowercase = kwargs.pop('text' ,__A )
if len(__A ) > 0:
_lowercase = args[0]
_lowercase = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_lowercase = self.feature_extractor(__A ,*__A ,sampling_rate=__A ,**__A )
if text is not None:
_lowercase = self.tokenizer(__A ,**__A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowercase = encodings['input_ids']
return inputs
def __UpperCAmelCase ( self : Dict ,*__A : Optional[int] ,**__A : Any ) -> Dict:
return self.tokenizer.batch_decode(*__A ,**__A )
def __UpperCAmelCase ( self : Optional[Any] ,*__A : str ,**__A : int ) -> str:
return self.tokenizer.decode(*__A ,**__A )
@contextmanager
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
_lowercase = True
_lowercase = self.tokenizer
yield
_lowercase = self.feature_extractor
_lowercase = False | 67 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
snake_case = False
snake_case = False
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Namespace ) -> Tuple:
return TrainCommand(snake_case__ )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( __A : ArgumentParser ) -> List[Any]:
_lowercase = parser.add_parser('train' ,help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' ,type=__A ,required=__A ,help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' ,)
train_parser.add_argument(
'--column_label' ,type=__A ,default=0 ,help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' ,type=__A ,default=1 ,help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' ,type=__A ,default=2 ,help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' ,action='store_true' ,help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' ,type=__A ,default='' ,help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' ,type=__A ,default=0.1 ,help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' ,)
train_parser.add_argument('--output' ,type=__A ,default='./' ,help='path to saved the trained model.' )
train_parser.add_argument(
'--task' ,type=__A ,default='text_classification' ,help='Task to train the model on.' )
train_parser.add_argument(
'--model' ,type=__A ,default='bert-base-uncased' ,help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' ,type=__A ,default=32 ,help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' ,type=__A ,default=64 ,help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' ,type=__A ,default=3e-5 ,help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' ,type=__A ,default=1e-08 ,help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self : Optional[Any] ,__A : Namespace ) -> Tuple:
_lowercase = logging.get_logger('transformers-cli/training' )
_lowercase = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output ,exist_ok=__A )
_lowercase = args.output
_lowercase = args.column_label
_lowercase = args.column_text
_lowercase = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
_lowercase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
_lowercase = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
_lowercase = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = args.validation_split
_lowercase = args.train_batch_size
_lowercase = args.valid_batch_size
_lowercase = args.learning_rate
_lowercase = args.adam_epsilon
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
raise NotImplementedError
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 67 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :str=False ) -> List[Any]:
_lowercase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowercase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :Optional[Any] , snake_case__ :List[str]=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
_lowercase = ''
else:
_lowercase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[
: config.hidden_size, :
]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] ) -> Tuple:
_lowercase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Tuple ) -> Optional[Any]:
_lowercase = dct.pop(snake_case__ )
_lowercase = val
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict , snake_case__ :Dict , snake_case__ :List[str]=True ) -> str:
_lowercase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_lowercase = 8
# set labels if required
if not base_model:
_lowercase = 1000
_lowercase = 'huggingface/label-files'
_lowercase = 'imagenet-1k-id2label.json'
_lowercase = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_lowercase = 384
_lowercase = 1536
_lowercase = 12
_lowercase = 6
# load original model from torch hub
_lowercase = torch.hub.load('facebookresearch/dino:main' , snake_case__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_lowercase = original_model.state_dict()
if base_model:
remove_classification_head_(snake_case__ )
_lowercase = create_rename_keys(snake_case__ , base_model=snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , snake_case__ )
# load HuggingFace model
if base_model:
_lowercase = ViTModel(snake_case__ , add_pooling_layer=snake_case__ ).eval()
else:
_lowercase = ViTForImageClassification(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by ViTImageProcessor
_lowercase = ViTImageProcessor()
_lowercase = image_processor(images=prepare_img() , return_tensors='pt' )
_lowercase = encoding['pixel_values']
_lowercase = model(snake_case__ )
if base_model:
_lowercase = original_model(snake_case__ )
assert torch.allclose(snake_case__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_lowercase = original_model(snake_case__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(snake_case__ , outputs.logits , atol=1E-3 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
snake_case = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model) | 67 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> str:
_lowercase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_lowercase = 1024
_lowercase = 4096
_lowercase = 24
_lowercase = 16
_lowercase = [5, 11, 17, 23]
_lowercase = [256, 512, 1024, 1024]
_lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = [256, 512, 768, 768]
_lowercase = 150
_lowercase = 16
_lowercase = (1, 384, 384)
_lowercase = False
_lowercase = 'project'
if "ade" in checkpoint_url:
_lowercase = True
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = 150
_lowercase = 16
_lowercase = 'huggingface/label-files'
_lowercase = 'ade20k-id2label.json'
_lowercase = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
_lowercase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowercase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowercase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowercase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_lowercase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowercase = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_lowercase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowercase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowercase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowercase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowercase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowercase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowercase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowercase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_lowercase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowercase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowercase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowercase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowercase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowercase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowercase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowercase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowercase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowercase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowercase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowercase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowercase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowercase = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowercase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowercase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowercase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_lowercase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_lowercase = name.replace('..' , '.' )
if "stem.conv" in name:
_lowercase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_lowercase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_lowercase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_lowercase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_lowercase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_lowercase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :int ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[: config.hidden_size, :]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Any , snake_case__ :List[str] ) -> str:
_lowercase , _lowercase = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowercase = torch.load(snake_case__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
_lowercase = state_dict.pop(snake_case__ )
_lowercase = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
_lowercase = DPTForSemanticSegmentation(snake_case__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
_lowercase = 480 if 'ade' in checkpoint_url else 384
_lowercase = DPTImageProcessor(size=snake_case__ )
_lowercase = prepare_img()
_lowercase = image_processor(snake_case__ , return_tensors='pt' )
# forward pass
_lowercase = model(**snake_case__ ).logits if 'ade' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
_lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 67 | 1 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 100 ) -> int:
_lowercase = n * (n + 1) * (2 * n + 1) / 6
_lowercase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""") | 67 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
snake_case = logging.get_logger(__name__)
@dataclass
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : str ,**__A : List[str] ) -> int:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowercase = deprecated_arg[3:]
_lowercase = not kwargs.pop(__A )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
_lowercase = kwargs.pop('tpu_name' ,self.tpu_name )
_lowercase = kwargs.pop('device_idx' ,self.device_idx )
_lowercase = kwargs.pop('eager_mode' ,self.eager_mode )
_lowercase = kwargs.pop('use_xla' ,self.use_xla )
super().__init__(**__A )
SCREAMING_SNAKE_CASE_ : str = field(
default=UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , )
SCREAMING_SNAKE_CASE_ : int = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
SCREAMING_SNAKE_CASE_ : bool = field(default=UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} )
SCREAMING_SNAKE_CASE_ : bool = field(
default=UpperCAmelCase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
_lowercase = None
if self.tpu:
try:
if self.tpu_name:
_lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_lowercase = None
return tpu
@cached_property
def __UpperCAmelCase ( self : str ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self ,['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_lowercase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' )
_lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] ,'GPU' ) # disable GPU
_lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def __UpperCAmelCase ( self : Any ) -> bool:
requires_backends(self ,['tf'] )
return self._setup_tpu is not None
@property
def __UpperCAmelCase ( self : Any ) -> "tf.distribute.Strategy":
requires_backends(self ,['tf'] )
return self._setup_strategy
@property
def __UpperCAmelCase ( self : Any ) -> str:
requires_backends(self ,['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def __UpperCAmelCase ( self : List[Any] ) -> int:
requires_backends(self ,['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> bool:
return self.n_gpu > 0 | 67 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 67 | 1 |
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
for n in range(1 , 100_0000 ):
yield n * (n + 1) // 2
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] ) -> Tuple:
_lowercase = 1
_lowercase = 2
while i * i <= n:
_lowercase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
return next(i for i in triangle_number_generator() if count_divisors(snake_case__ ) > 500 )
if __name__ == "__main__":
print(solution()) | 67 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''transfo-xl'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''mems''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] ,__A : Union[str, Any]=26_7735 ,__A : List[Any]=[2_0000, 4_0000, 20_0000] ,__A : Dict=1024 ,__A : str=1024 ,__A : Dict=16 ,__A : int=64 ,__A : Dict=4096 ,__A : List[Any]=4 ,__A : Optional[int]=False ,__A : Union[str, Any]=18 ,__A : Tuple=1600 ,__A : str=1000 ,__A : Dict=True ,__A : Dict=True ,__A : int=0 ,__A : Optional[int]=-1 ,__A : int=True ,__A : List[str]=0.1 ,__A : Optional[int]=0.0 ,__A : str=True ,__A : Tuple="normal" ,__A : Union[str, Any]=0.01 ,__A : Tuple=0.01 ,__A : Any=0.02 ,__A : Union[str, Any]=1e-5 ,__A : List[Any]=0 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = []
self.cutoffs.extend(__A )
if proj_share_all_but_first:
_lowercase = [False] + [True] * len(self.cutoffs )
else:
_lowercase = [False] + [False] * len(self.cutoffs )
_lowercase = d_model
_lowercase = d_embed
_lowercase = d_head
_lowercase = d_inner
_lowercase = div_val
_lowercase = pre_lnorm
_lowercase = n_layer
_lowercase = n_head
_lowercase = mem_len
_lowercase = same_length
_lowercase = attn_type
_lowercase = clamp_len
_lowercase = sample_softmax
_lowercase = adaptive
_lowercase = dropout
_lowercase = dropatt
_lowercase = untie_r
_lowercase = init
_lowercase = init_range
_lowercase = proj_init_std
_lowercase = init_std
_lowercase = layer_norm_epsilon
super().__init__(eos_token_id=__A ,**__A )
@property
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self : Any ,__A : Dict ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 67 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :Optional[Any] , snake_case__ :Optional[Any] , snake_case__ :Tuple ) -> int:
for attribute in key.split('.' ):
_lowercase = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
_lowercase = getattr(snake_case__ , snake_case__ ).shape
else:
_lowercase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_lowercase = value
elif weight_type == "weight_g":
_lowercase = value
elif weight_type == "weight_v":
_lowercase = value
elif weight_type == "bias":
_lowercase = value
else:
_lowercase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] , snake_case__ :Tuple , snake_case__ :List[Any] ) -> Tuple:
_lowercase = []
_lowercase = fairseq_model.state_dict()
_lowercase = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowercase = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == 'group' , )
_lowercase = True
else:
for key, mapped_key in MAPPING.items():
_lowercase = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
_lowercase = True
if "*" in mapped_key:
_lowercase = name.split(snake_case__ )[0].split('.' )[-2]
_lowercase = mapped_key.replace('*' , snake_case__ )
if "weight_g" in name:
_lowercase = 'weight_g'
elif "weight_v" in name:
_lowercase = 'weight_v'
elif "weight" in name:
_lowercase = 'weight'
elif "bias" in name:
_lowercase = 'bias'
else:
_lowercase = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :List[str] , snake_case__ :List[str] , snake_case__ :int , snake_case__ :int ) -> Any:
_lowercase = full_name.split('conv_layers.' )[-1]
_lowercase = name.split('.' )
_lowercase = int(items[0] )
_lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :str=None , snake_case__ :Tuple=None , snake_case__ :Any=True ) -> Optional[Any]:
if config_path is not None:
_lowercase = HubertConfig.from_pretrained(snake_case__ )
else:
_lowercase = HubertConfig()
if is_finetuned:
if dict_path:
_lowercase = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowercase = target_dict.pad_index
_lowercase = target_dict.bos_index
_lowercase = target_dict.eos_index
_lowercase = len(target_dict.symbols )
_lowercase = os.path.join(snake_case__ , 'vocab.json' )
if not os.path.isdir(snake_case__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with open(snake_case__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , snake_case__ )
_lowercase = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=snake_case__ , )
_lowercase = True if config.feat_extract_norm == 'layer' else False
_lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
_lowercase = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
_lowercase = HubertForCTC(snake_case__ )
else:
_lowercase = HubertModel(snake_case__ )
if is_finetuned:
_lowercase , _lowercase , _lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_lowercase , _lowercase , _lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowercase = model[0].eval()
recursively_load_weights(snake_case__ , snake_case__ , snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
snake_case = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 67 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''dpr'''
def __init__( self : int ,__A : Union[str, Any]=3_0522 ,__A : Optional[int]=768 ,__A : int=12 ,__A : List[Any]=12 ,__A : Optional[Any]=3072 ,__A : Union[str, Any]="gelu" ,__A : Union[str, Any]=0.1 ,__A : List[Any]=0.1 ,__A : str=512 ,__A : List[str]=2 ,__A : Tuple=0.02 ,__A : Tuple=1e-12 ,__A : List[Any]=0 ,__A : List[str]="absolute" ,__A : int = 0 ,**__A : int ,) -> Tuple:
super().__init__(pad_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = projection_dim
_lowercase = position_embedding_type | 67 | 1 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Any ,__A : Optional[int]=13 ,__A : Optional[Any]=30 ,__A : Any=2 ,__A : Tuple=3 ,__A : Any=True ,__A : str=True ,__A : Optional[int]=32 ,__A : Any=2 ,__A : str=4 ,__A : List[str]=37 ,__A : Union[str, Any]="gelu" ,__A : Tuple=0.1 ,__A : Union[str, Any]=0.1 ,__A : Tuple=10 ,__A : Optional[int]=0.02 ,__A : List[str]=3 ,__A : Dict=None ,) -> List[str]:
_lowercase = parent
_lowercase = batch_size
_lowercase = image_size
_lowercase = patch_size
_lowercase = num_channels
_lowercase = is_training
_lowercase = use_labels
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase = (image_size // patch_size) ** 2
_lowercase = num_patches + 1
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowercase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__A ,initializer_range=self.initializer_range ,)
def __UpperCAmelCase ( self : Tuple ,__A : Tuple ,__A : Union[str, Any] ,__A : Dict ) -> Dict:
_lowercase = TFViTModel(config=__A )
_lowercase = model(__A ,training=__A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_lowercase = self.image_size // 2
_lowercase = pixel_values[:, :, :image_size, :image_size]
_lowercase = model(__A ,interpolate_pos_encoding=__A ,training=__A )
_lowercase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : Any ,__A : int ) -> List[str]:
_lowercase = self.type_sequence_label_size
_lowercase = TFViTForImageClassification(__A )
_lowercase = model(__A ,labels=__A ,training=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_lowercase = self.image_size // 2
_lowercase = pixel_values[:, :, :image_size, :image_size]
_lowercase = model(__A ,interpolate_pos_encoding=__A ,training=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowercase = 1
_lowercase = TFViTForImageClassification(__A )
_lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase = model(__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
_lowercase = TFViTModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A ,hidden_size=37 )
def __UpperCAmelCase ( self : int ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
pass
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
_lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A ,tf.keras.layers.Layer ) )
def __UpperCAmelCase ( self : Any ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __UpperCAmelCase ( self : str ) -> int:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def __UpperCAmelCase ( self : int ) -> Any:
_lowercase = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
_lowercase = self.default_image_processor
_lowercase = prepare_img()
_lowercase = image_processor(images=__A ,return_tensors='tf' )
# forward pass
_lowercase = model(**__A )
# verify the logits
_lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,__A )
_lowercase = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] ,__A ,atol=1e-4 ) | 67 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main() | 67 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
snake_case = logging.get_logger(__name__)
# General docstring
snake_case = """ResNetConfig"""
# Base docstring
snake_case = """microsoft/resnet-50"""
snake_case = [1, 2_0_4_8, 7, 7]
# Image classification docstring
snake_case = """microsoft/resnet-50"""
snake_case = """tiger cat"""
snake_case = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ,__A : int ,__A : int ,__A : int = 3 ,__A : int = 1 ,__A : str = "relu" ) -> str:
super().__init__()
_lowercase = nn.Convad(
__A ,__A ,kernel_size=__A ,stride=__A ,padding=kernel_size // 2 ,bias=__A )
_lowercase = nn.BatchNormad(__A )
_lowercase = ACTaFN[activation] if activation is not None else nn.Identity()
def __UpperCAmelCase ( self : Tuple ,__A : Tensor ) -> Tensor:
_lowercase = self.convolution(__A )
_lowercase = self.normalization(__A )
_lowercase = self.activation(__A )
return hidden_state
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict ,__A : ResNetConfig ) -> Tuple:
super().__init__()
_lowercase = ResNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=7 ,stride=2 ,activation=config.hidden_act )
_lowercase = nn.MaxPoolad(kernel_size=3 ,stride=2 ,padding=1 )
_lowercase = config.num_channels
def __UpperCAmelCase ( self : int ,__A : Tensor ) -> Tensor:
_lowercase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_lowercase = self.embedder(__A )
_lowercase = self.pooler(__A )
return embedding
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : int ,__A : int ,__A : int ,__A : int = 2 ) -> int:
super().__init__()
_lowercase = nn.Convad(__A ,__A ,kernel_size=1 ,stride=__A ,bias=__A )
_lowercase = nn.BatchNormad(__A )
def __UpperCAmelCase ( self : Union[str, Any] ,__A : Tensor ) -> Tensor:
_lowercase = self.convolution(__A )
_lowercase = self.normalization(__A )
return hidden_state
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : int ,__A : int ,__A : int ,__A : int = 1 ,__A : str = "relu" ) -> Any:
super().__init__()
_lowercase = in_channels != out_channels or stride != 1
_lowercase = (
ResNetShortCut(__A ,__A ,stride=__A ) if should_apply_shortcut else nn.Identity()
)
_lowercase = nn.Sequential(
ResNetConvLayer(__A ,__A ,stride=__A ) ,ResNetConvLayer(__A ,__A ,activation=__A ) ,)
_lowercase = ACTaFN[activation]
def __UpperCAmelCase ( self : List[Any] ,__A : Dict ) -> Any:
_lowercase = hidden_state
_lowercase = self.layer(__A )
_lowercase = self.shortcut(__A )
hidden_state += residual
_lowercase = self.activation(__A )
return hidden_state
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,__A : int ,__A : int ,__A : int = 1 ,__A : str = "relu" ,__A : int = 4 ) -> List[str]:
super().__init__()
_lowercase = in_channels != out_channels or stride != 1
_lowercase = out_channels // reduction
_lowercase = (
ResNetShortCut(__A ,__A ,stride=__A ) if should_apply_shortcut else nn.Identity()
)
_lowercase = nn.Sequential(
ResNetConvLayer(__A ,__A ,kernel_size=1 ) ,ResNetConvLayer(__A ,__A ,stride=__A ) ,ResNetConvLayer(__A ,__A ,kernel_size=1 ,activation=__A ) ,)
_lowercase = ACTaFN[activation]
def __UpperCAmelCase ( self : List[Any] ,__A : List[str] ) -> Any:
_lowercase = hidden_state
_lowercase = self.layer(__A )
_lowercase = self.shortcut(__A )
hidden_state += residual
_lowercase = self.activation(__A )
return hidden_state
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,__A : ResNetConfig ,__A : int ,__A : int ,__A : int = 2 ,__A : int = 2 ,) -> List[str]:
super().__init__()
_lowercase = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
_lowercase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__A ,__A ,stride=__A ,activation=config.hidden_act ) ,*[layer(__A ,__A ,activation=config.hidden_act ) for _ in range(depth - 1 )] ,)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : Tensor ) -> Tensor:
_lowercase = input
for layer in self.layers:
_lowercase = layer(__A )
return hidden_state
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : str ,__A : ResNetConfig ) -> Union[str, Any]:
super().__init__()
_lowercase = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__A ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
_lowercase = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__A ,config.depths[1:] ):
self.stages.append(ResNetStage(__A ,__A ,__A ,depth=__A ) )
def __UpperCAmelCase ( self : Optional[int] ,__A : Tensor ,__A : bool = False ,__A : bool = True ) -> BaseModelOutputWithNoAttention:
_lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowercase = hidden_states + (hidden_state,)
_lowercase = stage_module(__A )
if output_hidden_states:
_lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__A ,hidden_states=__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ResNetConfig
SCREAMING_SNAKE_CASE_ : Any = '''resnet'''
SCREAMING_SNAKE_CASE_ : Tuple = '''pixel_values'''
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Tuple ,__A : int ) -> int:
if isinstance(__A ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='fan_out' ,nonlinearity='relu' )
elif isinstance(__A ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def __UpperCAmelCase ( self : Any ,__A : List[Any] ,__A : Union[str, Any]=False ) -> Optional[Any]:
if isinstance(__A ,__A ):
_lowercase = value
snake_case = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
snake_case = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , UpperCAmelCase , )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : List[Any] ) -> Dict:
super().__init__(__A )
_lowercase = config
_lowercase = ResNetEmbeddings(__A )
_lowercase = ResNetEncoder(__A )
_lowercase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__A ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def __UpperCAmelCase ( self : int ,__A : Tensor ,__A : Optional[bool] = None ,__A : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase = self.embedder(__A )
_lowercase = self.encoder(
__A ,output_hidden_states=__A ,return_dict=__A )
_lowercase = encoder_outputs[0]
_lowercase = self.pooler(__A )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__A ,pooler_output=__A ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCAmelCase , )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str ,__A : Optional[int] ) -> int:
super().__init__(__A )
_lowercase = config.num_labels
_lowercase = ResNetModel(__A )
# classification head
_lowercase = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__A ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Optional[torch.FloatTensor] = None ,__A : Optional[torch.LongTensor] = None ,__A : Optional[bool] = None ,__A : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
_lowercase = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase = self.resnet(__A ,output_hidden_states=__A ,return_dict=__A )
_lowercase = outputs.pooler_output if return_dict else outputs[1]
_lowercase = self.classifier(__A )
_lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowercase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowercase = 'single_label_classification'
else:
_lowercase = 'multi_label_classification'
if self.config.problem_type == "regression":
_lowercase = MSELoss()
if self.num_labels == 1:
_lowercase = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
_lowercase = loss_fct(__A ,__A )
elif self.config.problem_type == "single_label_classification":
_lowercase = CrossEntropyLoss()
_lowercase = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowercase = BCEWithLogitsLoss()
_lowercase = loss_fct(__A ,__A )
if not return_dict:
_lowercase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A ,logits=__A ,hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , UpperCAmelCase , )
class A_ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : Any ) -> List[str]:
super().__init__(__A )
super()._init_backbone(__A )
_lowercase = [config.embedding_size] + config.hidden_sizes
_lowercase = ResNetEmbeddings(__A )
_lowercase = ResNetEncoder(__A )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@replace_return_docstrings(output_type=__A ,config_class=_CONFIG_FOR_DOC )
def __UpperCAmelCase ( self : List[str] ,__A : Tensor ,__A : Optional[bool] = None ,__A : Optional[bool] = None ) -> BackboneOutput:
_lowercase = return_dict if return_dict is not None else self.config.use_return_dict
_lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase = self.embedder(__A )
_lowercase = self.encoder(__A ,output_hidden_states=__A ,return_dict=__A )
_lowercase = outputs.hidden_states
_lowercase = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_lowercase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__A ,hidden_states=outputs.hidden_states if output_hidden_states else None ,attentions=__A ,) | 67 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''big_bird'''
def __init__( self : str ,__A : Union[str, Any]=5_0358 ,__A : Any=768 ,__A : List[str]=12 ,__A : Union[str, Any]=12 ,__A : int=3072 ,__A : Tuple="gelu_new" ,__A : Any=0.1 ,__A : Optional[Any]=0.1 ,__A : Tuple=4096 ,__A : int=2 ,__A : Union[str, Any]=0.02 ,__A : Optional[int]=1e-12 ,__A : List[str]=True ,__A : List[Any]=0 ,__A : Optional[Any]=1 ,__A : Optional[int]=2 ,__A : Optional[int]=66 ,__A : Tuple="block_sparse" ,__A : Optional[int]=True ,__A : Optional[int]=False ,__A : Tuple=64 ,__A : str=3 ,__A : Optional[int]=None ,**__A : Dict ,) -> Union[str, Any]:
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,sep_token_id=__A ,**__A ,)
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
_lowercase = use_cache
_lowercase = rescale_embeddings
_lowercase = attention_type
_lowercase = use_bias
_lowercase = block_size
_lowercase = num_random_blocks
_lowercase = classifier_dropout
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 67 | 1 |
import re
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
if len(re.findall('[ATCG]' , snake_case__ ) ) != len(snake_case__ ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list:
_lowercase = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
_lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 | 1 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :Dict , snake_case__ :List[str] ) -> List[Any]:
# Initialise PyTorch model
_lowercase = BertConfig.from_json_file(snake_case__ )
print(F"""Building PyTorch model from configuration: {config}""" )
_lowercase = BertForPreTraining(snake_case__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 67 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff | 67 | 1 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] , snake_case__ :Union[str, Any] , snake_case__ :List[Any] ) -> Optional[Any]:
_lowercase = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(snake_case__ )}""" | 67 |
from manim import *
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = Rectangle(height=0.5 ,width=0.5 )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowercase = Rectangle(height=0.25 ,width=0.25 )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('CPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(4 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('GPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
gpu.move_to([-1, -1, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('Model' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
model.move_to([3, -1.0, 0] )
self.add(__A )
_lowercase = []
_lowercase = []
for i, rect in enumerate(__A ):
_lowercase = fill.copy().set_fill(__A ,opacity=0.8 )
target.move_to(__A )
model_arr.append(__A )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__A ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__A )
self.add(*__A ,*__A )
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('Disk' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
disk.move_to([-4, -1.25, 0] )
self.add(__A ,__A )
_lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__A ,__A )
_lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__A ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__A )
_lowercase = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ) )
_lowercase = Square(0.3 )
input.set_fill(__A ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__A ,buff=0.5 )
self.play(Write(__A ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__A ,buff=0.02 )
self.play(MoveToTarget(__A ) )
self.play(FadeOut(__A ) )
_lowercase = Arrow(start=__A ,end=__A ,color=__A ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__A ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowercase = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) )
_lowercase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__A ) ,Circumscribe(model_arr[0] ,color=__A ,**__A ) ,Circumscribe(model_cpu_arr[0] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__A ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_lowercase = AnimationGroup(
FadeOut(__A ,run_time=0.5 ) ,MoveToTarget(__A ,run_time=0.5 ) ,FadeIn(__A ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowercase = 0.7
self.play(
Circumscribe(model_arr[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,Circumscribe(model_arr[i + 1] ,color=__A ,**__A ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__A ,**__A ) ,Circumscribe(cpu_left_col_base[-1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowercase = a_c
_lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__A ) ,FadeOut(__A ,run_time=0.5 ) ,)
_lowercase = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) ,MoveToTarget(__A ) )
self.wait() | 67 | 1 |
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> list[int]:
_lowercase = []
_lowercase = 2
_lowercase = int(math.sqrt(snake_case__ ) ) # Size of every segment
_lowercase = [True] * (end + 1)
_lowercase = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case__ )
for i in range(start * start , end + 1 , snake_case__ ):
_lowercase = False
start += 1
prime += in_prime
_lowercase = end + 1
_lowercase = min(2 * end , snake_case__ )
while low <= n:
_lowercase = [True] * (high - low + 1)
for each in in_prime:
_lowercase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case__ , high + 1 , snake_case__ ):
_lowercase = False
for j in range(len(snake_case__ ) ):
if temp[j] is True:
prime.append(j + low )
_lowercase = high + 1
_lowercase = min(high + end , snake_case__ )
return prime
print(sieve(1_0**6)) | 67 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Any ,__A : Tuple=None ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : int="resnet50" ,__A : int=3 ,__A : List[Any]=32 ,__A : Tuple=3 ,__A : List[Any]=True ,__A : Tuple=True ,) -> Any:
_lowercase = parent
_lowercase = out_indices if out_indices is not None else [4]
_lowercase = stage_names
_lowercase = out_features
_lowercase = backbone
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = use_pretrained_backbone
_lowercase = is_training
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __UpperCAmelCase ( self : Any ,__A : Any ,__A : Dict ) -> Union[str, Any]:
_lowercase = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = TimmBackboneModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : int ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = 'resnet18'
_lowercase = 'microsoft/resnet-18'
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A )
_lowercase = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A ,out_indices=[1, 2, 3] )
_lowercase = AutoBackbone.from_pretrained(__A ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : int ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
_lowercase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase = self.all_model_classes[0]
_lowercase = model_class(__A )
model.to(__A )
_lowercase = self._prepare_for_class(__A ,__A )
_lowercase = model(**__A )
_lowercase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase = copy.deepcopy(__A )
_lowercase = None
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
_lowercase = copy.deepcopy(__A )
_lowercase = False
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A ) | 67 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''realm'''
def __init__( self : Tuple ,__A : List[str]=3_0522 ,__A : Any=768 ,__A : Dict=128 ,__A : List[Any]=12 ,__A : Dict=12 ,__A : Union[str, Any]=8 ,__A : Optional[Any]=3072 ,__A : Optional[Any]="gelu_new" ,__A : str=0.1 ,__A : Dict=0.1 ,__A : Optional[int]=512 ,__A : Any=2 ,__A : List[Any]=0.02 ,__A : Any=1e-12 ,__A : List[str]=256 ,__A : Any=10 ,__A : int=1e-3 ,__A : Union[str, Any]=5 ,__A : List[Any]=320 ,__A : List[Any]=1335_3718 ,__A : Tuple=5000 ,__A : str=1 ,__A : Dict=0 ,__A : int=2 ,**__A : Dict ,) -> Optional[int]:
super().__init__(pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,**__A )
# Common config
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = retriever_proj_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = num_candidates
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
# Reader config
_lowercase = span_hidden_size
_lowercase = max_span_width
_lowercase = reader_layer_norm_eps
_lowercase = reader_beam_size
_lowercase = reader_seq_len
# Retrieval config
_lowercase = num_block_records
_lowercase = searcher_beam_size | 67 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 1 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
snake_case = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : bool = field(default=UpperCAmelCase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
SCREAMING_SNAKE_CASE_ : bool = field(
default=UpperCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=UpperCAmelCase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[Union[str, Path, GenerationConfig]] = field(
default=UpperCAmelCase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
_lowercase = super().to_dict()
for k, v in d.items():
if isinstance(__A ,__A ):
_lowercase = v.to_dict()
return d | 67 |
snake_case = {str(digit): digit**5 for digit in range(1_0)}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case__ ) )
if __name__ == "__main__":
print(solution()) | 67 | 1 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[int] , snake_case__ :list[int] ) -> None:
_lowercase = len(snake_case__ )
print('The following activities are selected:' )
# The first activity is always selected
_lowercase = 0
print(snake_case__ , end=',' )
# Consider rest of the activities
for j in range(snake_case__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case__ , end=',' )
_lowercase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case = [1, 3, 0, 5, 8, 5]
snake_case = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 67 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> list[int]:
_lowercase = str(snake_case__ )
_lowercase = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 11 ) -> list[int]:
_lowercase = []
_lowercase = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
_lowercase = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""") | 67 | 1 |
import torch
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
if torch.cuda.is_available():
_lowercase = torch.cuda.device_count()
else:
_lowercase = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main() | 67 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : Tuple ,__A : Any=99 ,__A : Any=13 ,__A : Dict=7 ,__A : List[Any]=9 ,__A : Dict=True ,__A : Any=True ,__A : Tuple=False ,__A : str=32 ,__A : int=5 ,__A : List[str]=4 ,__A : Optional[Any]=37 ,__A : int=8 ,__A : Any=0.1 ,__A : Dict=0.002 ,__A : Union[str, Any]=1 ,__A : Optional[Any]=0 ,__A : int=0 ,__A : Tuple=None ,__A : str=None ,) -> List[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = encoder_seq_length
_lowercase = decoder_seq_length
# For common tests
_lowercase = self.decoder_seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = d_ff
_lowercase = relative_attention_num_buckets
_lowercase = dropout_rate
_lowercase = initializer_factor
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = decoder_start_token_id
_lowercase = None
_lowercase = decoder_layers
def __UpperCAmelCase ( self : Dict ) -> Dict:
return TaConfig.from_pretrained('google/umt5-base' )
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ,__A : int ,__A : str ,__A : List[str]=None ,__A : List[str]=None ,__A : Any=None ,__A : List[Any]=None ,__A : str=None ,) -> Tuple:
if attention_mask is None:
_lowercase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowercase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowercase = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=__A )
if decoder_head_mask is None:
_lowercase = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=__A )
if cross_attn_head_mask is None:
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowercase = input_ids.clamp(self.pad_token_id + 1 )
_lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowercase = self.get_config()
_lowercase = config.num_attention_heads
_lowercase = self.prepare_inputs_dict(__A ,__A ,__A )
return config, input_dict
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase , _lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Dict ) -> Any:
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : List[str] ,__A : Dict ,__A : List[str] ,__A : List[Any] ,__A : Tuple ,__A : int ,) -> Tuple:
_lowercase = UMTaModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
input_ids=__A ,decoder_input_ids=__A ,attention_mask=__A ,decoder_attention_mask=__A ,)
_lowercase = model(input_ids=__A ,decoder_input_ids=__A )
_lowercase = result.last_hidden_state
_lowercase = result.past_key_values
_lowercase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def __UpperCAmelCase ( self : List[Any] ,__A : Tuple ,__A : int ,__A : Any ,__A : Tuple ,__A : Any ,__A : Optional[int] ,) -> List[str]:
_lowercase = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
_lowercase = model(__A ,use_cache=__A )
_lowercase = model(__A )
_lowercase = model(__A ,use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase = model(__A )['last_hidden_state']
_lowercase = model(__A ,past_key_values=__A )['last_hidden_state']
# select random slice
_lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1e-3 ) )
def __UpperCAmelCase ( self : Any ,__A : List[str] ,__A : List[str] ,) -> int:
_lowercase = UMTaModel(config=__A ).to(__A ).half().eval()
_lowercase = model(**__A )['last_hidden_state']
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ : Dict = [0.8, 0.9]
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=__A ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = config_and_inputs[0]
_lowercase = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
_lowercase = {
'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=__A ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
}
for attn_name, (name, mask) in zip(__A ,head_masking.items() ):
_lowercase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=__A )
_lowercase = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=__A ,return_dict_in_generate=__A ,**__A ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __UpperCAmelCase ( self : str ) -> List[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=__A ).to(__A )
_lowercase = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=__A ,legacy=__A )
_lowercase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowercase = tokenizer(__A ,return_tensors='pt' ,padding=__A ).input_ids
# fmt: off
_lowercase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A ,__A )
_lowercase = model.generate(input_ids.to(__A ) )
_lowercase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowercase = tokenizer.batch_decode(__A )
self.assertEqual(__A ,__A ) | 67 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[int] ) -> int:
if not nums:
return 0
_lowercase = nums[0]
_lowercase = 0
for num in nums[1:]:
_lowercase , _lowercase = (
max_excluding + num,
max(snake_case__ , snake_case__ ),
)
return max(snake_case__ , snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=__A ,)
assert hasattr(self ,'env' )
def __UpperCAmelCase ( self : str ,__A : Tuple ) -> int:
# configuration for running training on smdistributed Model Parallel
_lowercase = {
'enabled': True,
'processes_per_host': 8,
}
_lowercase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowercase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowercase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__A ,instance_type=self.instance_type ,debugger_hook_config=__A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=__A ,py_version='py36' ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Any ) -> Optional[Any]:
TrainingJobAnalytics(__A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Optional[Any]:
# create estimator
_lowercase = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__A ) | 67 | 1 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
snake_case = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
snake_case = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
snake_case = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] ,)
def __UpperCAmelCase ( self : Optional[Any] ,__A : Optional[Any] ,__A : List[str] ,__A : List[str]=None ,__A : List[Any]=None ,__A : Dict=None ,__A : str=None ,__A : int="auto" ,__A : List[str]=-1 ,__A : Union[str, Any]=0.9 ,__A : Dict=5 ,__A : Optional[Any]=500 ,__A : int="gpt2-large" ,__A : List[Any]=-1 ,__A : List[Any]=1024 ,__A : Tuple=25 ,__A : Optional[Any]=5 ,__A : int=True ,__A : int=25 ,) -> List[str]:
_lowercase = compute_mauve(
p_text=__A ,q_text=__A ,p_features=__A ,q_features=__A ,p_tokens=__A ,q_tokens=__A ,num_buckets=__A ,pca_max_data=__A ,kmeans_explained_var=__A ,kmeans_num_redo=__A ,kmeans_max_iter=__A ,featurize_model_name=__A ,device_id=__A ,max_text_length=__A ,divergence_curve_discretization_size=__A ,mauve_scaling_factor=__A ,verbose=__A ,seed=__A ,)
return out | 67 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A ) | 67 | 1 |
from __future__ import annotations
from typing import Any
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : int = 6 ) -> None:
_lowercase = None
_lowercase = None
self.create_linked_list(__A )
def __UpperCAmelCase ( self : List[str] ,__A : int ) -> None:
_lowercase = Node()
_lowercase = current_node
_lowercase = current_node
_lowercase = current_node
for _ in range(1 ,__A ):
_lowercase = Node()
_lowercase = current_node
_lowercase = previous_node
_lowercase = current_node
_lowercase = self.front
_lowercase = previous_node
def __UpperCAmelCase ( self : List[Any] ) -> bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __UpperCAmelCase ( self : Optional[int] ) -> Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __UpperCAmelCase ( self : Any ,__A : Any ) -> None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
_lowercase = self.rear.next
if self.rear:
_lowercase = data
def __UpperCAmelCase ( self : str ) -> Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
_lowercase = self.front.data
_lowercase = None
return data
_lowercase = self.front
_lowercase = old_front.next
_lowercase = old_front.data
_lowercase = None
return data
def __UpperCAmelCase ( self : int ) -> None:
if self.is_empty():
raise Exception('Empty Queue' )
def __UpperCAmelCase ( self : List[Any] ) -> None:
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue' )
class A_ :
"""simple docstring"""
def __init__( self : Optional[Any] ) -> None:
_lowercase = None
_lowercase = None
_lowercase = None
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.dummy_uncond_unet
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ).images
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ,return_dict=__A )[0]
_lowercase = image[0, -3:, -3:, -1]
_lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = 'google/ncsnpp-celebahq-256'
_lowercase = UNetaDModel.from_pretrained(__A )
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=20 ,generator=__A ,output_type='numpy' ).images
_lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 67 | 1 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
snake_case = logging.get_logger(__name__)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = '''vision-encoder-decoder'''
SCREAMING_SNAKE_CASE_ : Any = True
def __init__( self : List[Any] ,**__A : List[str] ) -> Optional[Any]:
super().__init__(**__A )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
_lowercase = kwargs.pop('encoder' )
_lowercase = encoder_config.pop('model_type' )
_lowercase = kwargs.pop('decoder' )
_lowercase = decoder_config.pop('model_type' )
_lowercase = AutoConfig.for_model(__A ,**__A )
_lowercase = AutoConfig.for_model(__A ,**__A )
_lowercase = True
@classmethod
def __UpperCAmelCase ( cls : Any ,__A : PretrainedConfig ,__A : PretrainedConfig ,**__A : List[str] ) -> PretrainedConfig:
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_lowercase = True
_lowercase = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**__A )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
_lowercase = copy.deepcopy(self.__dict__ )
_lowercase = self.encoder.to_dict()
_lowercase = self.decoder.to_dict()
_lowercase = self.__class__.model_type
return output
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self : int ) -> float:
return 1e-4
@property
def __UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
_lowercase = OrderedDict()
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_lowercase = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : "PreTrainedTokenizerBase" ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional["TensorType"] = None ,) -> Mapping[str, Any]:
import torch
_lowercase = OrderedDict()
_lowercase = super().generate_dummy_inputs(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
_lowercase , _lowercase = dummy_input['input_ids'].shape
_lowercase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowercase = dummy_input.pop('input_ids' )
_lowercase = dummy_input.pop('attention_mask' )
_lowercase = torch.zeros(__A )
return common_inputs
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[str] ) -> None:
pass
def __UpperCAmelCase ( self : List[str] ,__A : PretrainedConfig ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(__A )
def __UpperCAmelCase ( self : Dict ,__A : PretrainedConfig ,__A : PretrainedConfig ,__A : str = "default" ) -> OnnxConfig:
_lowercase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__A ,__A ) | 67 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> list:
_lowercase = len(snake_case__ )
_lowercase = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
_lowercase = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
_lowercase = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 67 | 1 |
class A_ :
"""simple docstring"""
def __init__( self : List[str] ) -> Any:
_lowercase = 0
_lowercase = 0
_lowercase = {}
def __UpperCAmelCase ( self : Union[str, Any] ,__A : Tuple ) -> Dict:
if vertex not in self.adjacency:
_lowercase = {}
self.num_vertices += 1
def __UpperCAmelCase ( self : Dict ,__A : List[Any] ,__A : Tuple ,__A : str ) -> Union[str, Any]:
self.add_vertex(__A )
self.add_vertex(__A )
if head == tail:
return
_lowercase = weight
_lowercase = weight
def __UpperCAmelCase ( self : List[str] ) -> str:
_lowercase = self.get_edges()
for edge in edges:
_lowercase , _lowercase , _lowercase = edge
edges.remove((tail, head, weight) )
for i in range(len(__A ) ):
_lowercase = list(edges[i] )
edges.sort(key=lambda __A : e[2] )
for i in range(len(__A ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_lowercase = edges[i][2] + 1
for edge in edges:
_lowercase , _lowercase , _lowercase = edge
_lowercase = weight
_lowercase = weight
def __str__( self : List[Any] ) -> Tuple:
_lowercase = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowercase = self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def __UpperCAmelCase ( self : Any ) -> Dict:
_lowercase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
return self.adjacency.keys()
@staticmethod
def __UpperCAmelCase ( __A : str=None ,__A : Optional[int]=None ) -> int:
_lowercase = Graph()
if vertices is None:
_lowercase = []
if edges is None:
_lowercase = []
for vertex in vertices:
g.add_vertex(__A )
for edge in edges:
g.add_edge(*__A )
return g
class A_ :
"""simple docstring"""
def __init__( self : Any ) -> int:
_lowercase = {}
_lowercase = {}
def __len__( self : Optional[int] ) -> Optional[Any]:
return len(self.parent )
def __UpperCAmelCase ( self : str ,__A : List[str] ) -> Any:
if item in self.parent:
return self.find(__A )
_lowercase = item
_lowercase = 0
return item
def __UpperCAmelCase ( self : List[Any] ,__A : Dict ) -> List[str]:
if item not in self.parent:
return self.make_set(__A )
if item != self.parent[item]:
_lowercase = self.find(self.parent[item] )
return self.parent[item]
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ,__A : List[Any] ) -> Any:
_lowercase = self.find(__A )
_lowercase = self.find(__A )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowercase = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowercase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowercase = roota
return roota
return None
@staticmethod
def __UpperCAmelCase ( __A : Union[str, Any] ) -> List[Any]:
_lowercase = graph.num_vertices
_lowercase = Graph.UnionFind()
_lowercase = []
while num_components > 1:
_lowercase = {}
for vertex in graph.get_vertices():
_lowercase = -1
_lowercase = graph.get_edges()
for edge in edges:
_lowercase , _lowercase , _lowercase = edge
edges.remove((tail, head, weight) )
for edge in edges:
_lowercase , _lowercase , _lowercase = edge
_lowercase = union_find.find(__A )
_lowercase = union_find.find(__A )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowercase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowercase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowercase , _lowercase , _lowercase = cheap_edge[vertex]
if union_find.find(__A ) != union_find.find(__A ):
union_find.union(__A ,__A )
mst_edges.append(cheap_edge[vertex] )
_lowercase = num_components - 1
_lowercase = Graph.build(edges=__A )
return mst | 67 |
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray ) -> bool:
return np.array_equal(snake_case__ , matrix.conjugate().T )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray , snake_case__ :np.ndarray ) -> Any:
_lowercase = v.conjugate().T
_lowercase = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
_lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 67 | 1 |
from PIL import Image
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Image ) -> Image:
_lowercase , _lowercase = image.size
_lowercase = 0
_lowercase = image.load()
for i in range(snake_case__ ):
for j in range(snake_case__ ):
_lowercase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(snake_case__ ):
for i in range(snake_case__ ):
_lowercase = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
snake_case = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""") | 67 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : Dict ,__A : List[Any]=7 ,__A : Dict=3 ,__A : Tuple=30 ,__A : Dict=400 ,__A : Any=True ,__A : List[Any]=None ,__A : Any=True ,__A : List[str]=[0.5, 0.5, 0.5] ,__A : Union[str, Any]=[0.5, 0.5, 0.5] ,__A : int=True ,__A : List[str]=1 / 255 ,__A : Union[str, Any]=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_pad
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : List[str]=False ) -> Union[str, Any]:
if not batched:
_lowercase = image_inputs[0]
if isinstance(__A ,Image.Image ):
_lowercase , _lowercase = image.size
else:
_lowercase , _lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size['shortest_edge'] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size['shortest_edge'] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase , _lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(__A ,key=lambda __A : item[0] )[0]
_lowercase = max(__A ,key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'image_mean' ) )
self.assertTrue(hasattr(__A ,'image_std' ) )
self.assertTrue(hasattr(__A ,'do_normalize' ) )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'do_rescale' ) )
self.assertTrue(hasattr(__A ,'do_pad' ) )
self.assertTrue(hasattr(__A ,'size' ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# prepare image and target
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowercase = DetaImageProcessor()
_lowercase = image_processing(images=__A ,annotations=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowercase = DetaImageProcessor(format='coco_panoptic' )
_lowercase = image_processing(images=__A ,annotations=__A ,masks_path=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify masks
_lowercase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__A )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) ) | 67 | 1 |
from itertools import permutations
def SCREAMING_SNAKE_CASE__ ( snake_case__ :tuple ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_lowercase = [7, 11, 13, 17]
for i, test in enumerate(snake_case__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 10 ) -> int:
return sum(
int(''.join(map(snake_case__ , snake_case__ ) ) )
for num in permutations(range(snake_case__ ) )
if is_substring_divisible(snake_case__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 67 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
snake_case = False
snake_case = False
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Namespace ) -> Tuple:
return TrainCommand(snake_case__ )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( __A : ArgumentParser ) -> List[Any]:
_lowercase = parser.add_parser('train' ,help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' ,type=__A ,required=__A ,help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' ,)
train_parser.add_argument(
'--column_label' ,type=__A ,default=0 ,help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' ,type=__A ,default=1 ,help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' ,type=__A ,default=2 ,help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' ,action='store_true' ,help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' ,type=__A ,default='' ,help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' ,type=__A ,default=0.1 ,help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' ,)
train_parser.add_argument('--output' ,type=__A ,default='./' ,help='path to saved the trained model.' )
train_parser.add_argument(
'--task' ,type=__A ,default='text_classification' ,help='Task to train the model on.' )
train_parser.add_argument(
'--model' ,type=__A ,default='bert-base-uncased' ,help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' ,type=__A ,default=32 ,help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' ,type=__A ,default=64 ,help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' ,type=__A ,default=3e-5 ,help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' ,type=__A ,default=1e-08 ,help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self : Optional[Any] ,__A : Namespace ) -> Tuple:
_lowercase = logging.get_logger('transformers-cli/training' )
_lowercase = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output ,exist_ok=__A )
_lowercase = args.output
_lowercase = args.column_label
_lowercase = args.column_text
_lowercase = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
_lowercase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
_lowercase = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
_lowercase = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = args.validation_split
_lowercase = args.train_batch_size
_lowercase = args.valid_batch_size
_lowercase = args.learning_rate
_lowercase = args.adam_epsilon
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
raise NotImplementedError
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 67 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A_ ( unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ('foo.json',)] )
def __UpperCAmelCase ( self : Any ,__A : Any ) -> Optional[Any]:
_lowercase = GenerationConfig(
do_sample=__A ,temperature=0.7 ,length_penalty=1.0 ,bad_words_ids=[[1, 2, 3], [4, 5]] ,)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A ,config_name=__A )
_lowercase = GenerationConfig.from_pretrained(__A ,config_name=__A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample ,__A )
self.assertEqual(loaded_config.temperature ,0.7 )
self.assertEqual(loaded_config.length_penalty ,1.0 )
self.assertEqual(loaded_config.bad_words_ids ,[[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k ,50 )
self.assertEqual(loaded_config.max_length ,20 )
self.assertEqual(loaded_config.max_time ,__A )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = AutoConfig.from_pretrained('gpt2' )
_lowercase = GenerationConfig.from_model_config(__A )
_lowercase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__A ,__A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id ,default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id ,model_config.eos_token_id )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
_lowercase = GenerationConfig()
_lowercase = {
'max_new_tokens': 1024,
'foo': 'bar',
}
_lowercase = copy.deepcopy(__A )
_lowercase = generation_config.update(**__A )
# update_kwargs was not modified (no side effects)
self.assertEqual(__A ,__A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens ,1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__A ,{'foo': 'bar'} )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
_lowercase = GenerationConfig()
_lowercase = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(__A )
_lowercase = GenerationConfig.from_pretrained(__A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo ,'bar' )
_lowercase = GenerationConfig.from_model_config(__A )
assert not hasattr(__A ,'foo' ) # no new kwargs should be initialized if from config
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
_lowercase = GenerationConfig()
self.assertEqual(default_config.temperature ,1.0 )
self.assertEqual(default_config.do_sample ,__A )
self.assertEqual(default_config.num_beams ,1 )
_lowercase = GenerationConfig(
do_sample=__A ,temperature=0.7 ,length_penalty=1.0 ,bad_words_ids=[[1, 2, 3], [4, 5]] ,)
self.assertEqual(config.temperature ,0.7 )
self.assertEqual(config.do_sample ,__A )
self.assertEqual(config.num_beams ,1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
_lowercase = GenerationConfig.from_pretrained(__A ,temperature=1.0 )
self.assertEqual(loaded_config.temperature ,1.0 )
self.assertEqual(loaded_config.do_sample ,__A )
self.assertEqual(loaded_config.num_beams ,1 ) # default value
@is_staging_test
class A_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] ) -> Optional[int]:
_lowercase = TOKEN
HfFolder.save_token(__A )
@classmethod
def __UpperCAmelCase ( cls : List[str] ) -> Union[str, Any]:
try:
delete_repo(token=cls._token ,repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = GenerationConfig(
do_sample=__A ,temperature=0.7 ,length_penalty=1.0 ,)
config.push_to_hub('test-generation-config' ,use_auth_token=self._token )
_lowercase = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A ,getattr(__A ,__A ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A ,repo_id='test-generation-config' ,push_to_hub=__A ,use_auth_token=self._token )
_lowercase = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A ,getattr(__A ,__A ) )
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
_lowercase = GenerationConfig(
do_sample=__A ,temperature=0.7 ,length_penalty=1.0 ,)
config.push_to_hub('valid_org/test-generation-config-org' ,use_auth_token=self._token )
_lowercase = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A ,getattr(__A ,__A ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A ,repo_id='valid_org/test-generation-config-org' ,push_to_hub=__A ,use_auth_token=self._token )
_lowercase = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A ,getattr(__A ,__A ) ) | 67 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> str:
_lowercase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_lowercase = 1024
_lowercase = 4096
_lowercase = 24
_lowercase = 16
_lowercase = [5, 11, 17, 23]
_lowercase = [256, 512, 1024, 1024]
_lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = [256, 512, 768, 768]
_lowercase = 150
_lowercase = 16
_lowercase = (1, 384, 384)
_lowercase = False
_lowercase = 'project'
if "ade" in checkpoint_url:
_lowercase = True
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = 150
_lowercase = 16
_lowercase = 'huggingface/label-files'
_lowercase = 'ade20k-id2label.json'
_lowercase = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
_lowercase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowercase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowercase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowercase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_lowercase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowercase = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_lowercase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowercase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowercase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowercase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowercase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowercase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowercase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowercase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_lowercase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowercase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowercase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowercase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowercase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowercase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowercase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowercase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowercase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowercase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowercase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowercase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowercase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowercase = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowercase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowercase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowercase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_lowercase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_lowercase = name.replace('..' , '.' )
if "stem.conv" in name:
_lowercase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_lowercase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_lowercase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_lowercase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_lowercase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_lowercase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :int ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[: config.hidden_size, :]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Any , snake_case__ :List[str] ) -> str:
_lowercase , _lowercase = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowercase = torch.load(snake_case__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
_lowercase = state_dict.pop(snake_case__ )
_lowercase = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
_lowercase = DPTForSemanticSegmentation(snake_case__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
_lowercase = 480 if 'ade' in checkpoint_url else 384
_lowercase = DPTImageProcessor(size=snake_case__ )
_lowercase = prepare_img()
_lowercase = image_processor(snake_case__ , return_tensors='pt' )
# forward pass
_lowercase = model(**snake_case__ ).logits if 'ade' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
_lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 67 | 1 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
snake_case = get_tests_dir("""fixtures/test_sentencepiece.model""")
snake_case = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
snake_case = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = CamembertTokenizer
SCREAMING_SNAKE_CASE_ : List[Any] = CamembertTokenizerFast
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : int = True
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase = CamembertTokenizer(__A )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
_lowercase = '<pad>'
_lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) ,__A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) ,__A )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
_lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>NOTUSED' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(__A ) ,1004 )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size ,1005 )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
_lowercase = CamembertTokenizer(__A )
tokenizer.save_pretrained(self.tmpdirname )
_lowercase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_lowercase = 'I was born in 92000, and this is falsé.'
_lowercase = tokenizer.encode(__A )
_lowercase = rust_tokenizer.encode(__A )
self.assertListEqual(__A ,__A )
_lowercase = tokenizer.encode(__A ,add_special_tokens=__A )
_lowercase = rust_tokenizer.encode(__A ,add_special_tokens=__A )
self.assertListEqual(__A ,__A )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_lowercase = tokenizer.convert_ids_to_tokens(__A )
_lowercase = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A ,__A )
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
_lowercase = self.get_tokenizer()
_lowercase = self.get_rust_tokenizer()
_lowercase = 'I was born in 92000, and this is falsé.'
_lowercase = tokenizer.tokenize(__A )
_lowercase = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A ,__A )
_lowercase = tokenizer.encode(__A ,add_special_tokens=__A )
_lowercase = rust_tokenizer.encode(__A ,add_special_tokens=__A )
self.assertListEqual(__A ,__A )
_lowercase = self.get_rust_tokenizer()
_lowercase = tokenizer.encode(__A )
_lowercase = rust_tokenizer.encode(__A )
self.assertListEqual(__A ,__A )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
# fmt: off
_lowercase = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_lowercase = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=__A ,model_name='camembert-base' ,revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' ,sequences=__A ,) | 67 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 1 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
_lowercase = os.path.dirname(os.path.realpath(snake_case__ ) )
_lowercase = os.path.join(snake_case__ , 'triangle.txt' )
with open(snake_case__ ) as f:
_lowercase = f.readlines()
_lowercase = []
for line in triangle:
_lowercase = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(snake_case__ ) )
a.append(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
for j in range(len(a[i] ) ):
_lowercase = a[i - 1][j] if j != len(a[i - 1] ) else 0
_lowercase = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(snake_case__ , snake_case__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution()) | 67 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 67 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case = {
"""configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""],
"""tokenization_lxmert""": ["""LxmertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""LxmertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""LxmertEncoder""",
"""LxmertForPreTraining""",
"""LxmertForQuestionAnswering""",
"""LxmertModel""",
"""LxmertPreTrainedModel""",
"""LxmertVisualFeatureEncoder""",
"""LxmertXLayer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLxmertForPreTraining""",
"""TFLxmertMainLayer""",
"""TFLxmertModel""",
"""TFLxmertPreTrainedModel""",
"""TFLxmertVisualFeatureEncoder""",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''transfo-xl'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''mems''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] ,__A : Union[str, Any]=26_7735 ,__A : List[Any]=[2_0000, 4_0000, 20_0000] ,__A : Dict=1024 ,__A : str=1024 ,__A : Dict=16 ,__A : int=64 ,__A : Dict=4096 ,__A : List[Any]=4 ,__A : Optional[int]=False ,__A : Union[str, Any]=18 ,__A : Tuple=1600 ,__A : str=1000 ,__A : Dict=True ,__A : Dict=True ,__A : int=0 ,__A : Optional[int]=-1 ,__A : int=True ,__A : List[str]=0.1 ,__A : Optional[int]=0.0 ,__A : str=True ,__A : Tuple="normal" ,__A : Union[str, Any]=0.01 ,__A : Tuple=0.01 ,__A : Any=0.02 ,__A : Union[str, Any]=1e-5 ,__A : List[Any]=0 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = []
self.cutoffs.extend(__A )
if proj_share_all_but_first:
_lowercase = [False] + [True] * len(self.cutoffs )
else:
_lowercase = [False] + [False] * len(self.cutoffs )
_lowercase = d_model
_lowercase = d_embed
_lowercase = d_head
_lowercase = d_inner
_lowercase = div_val
_lowercase = pre_lnorm
_lowercase = n_layer
_lowercase = n_head
_lowercase = mem_len
_lowercase = same_length
_lowercase = attn_type
_lowercase = clamp_len
_lowercase = sample_softmax
_lowercase = adaptive
_lowercase = dropout
_lowercase = dropatt
_lowercase = untie_r
_lowercase = init
_lowercase = init_range
_lowercase = proj_init_std
_lowercase = init_std
_lowercase = layer_norm_epsilon
super().__init__(eos_token_id=__A ,**__A )
@property
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self : Any ,__A : Dict ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 67 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
snake_case = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = '''albert'''
def __init__( self : Optional[int] ,__A : Tuple=3_0000 ,__A : int=128 ,__A : List[Any]=4096 ,__A : List[str]=12 ,__A : str=1 ,__A : Dict=64 ,__A : Any=1_6384 ,__A : Union[str, Any]=1 ,__A : List[Any]="gelu_new" ,__A : List[Any]=0 ,__A : Any=0 ,__A : Tuple=512 ,__A : List[str]=2 ,__A : Tuple=0.02 ,__A : Dict=1e-12 ,__A : Tuple=0.1 ,__A : str="absolute" ,__A : List[Any]=0 ,__A : str=2 ,__A : Optional[Any]=3 ,**__A : List[str] ,) -> Dict:
super().__init__(pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = embedding_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_hidden_groups
_lowercase = num_attention_heads
_lowercase = inner_group_num
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = classifier_dropout_prob
_lowercase = position_embedding_type
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 67 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''dpr'''
def __init__( self : int ,__A : Union[str, Any]=3_0522 ,__A : Optional[int]=768 ,__A : int=12 ,__A : List[Any]=12 ,__A : Optional[Any]=3072 ,__A : Union[str, Any]="gelu" ,__A : Union[str, Any]=0.1 ,__A : List[Any]=0.1 ,__A : str=512 ,__A : List[str]=2 ,__A : Tuple=0.02 ,__A : Tuple=1e-12 ,__A : List[Any]=0 ,__A : List[str]="absolute" ,__A : int = 0 ,**__A : int ,) -> Tuple:
super().__init__(pad_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = projection_dim
_lowercase = position_embedding_type | 67 | 1 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_lowercase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' ,safety_checker=__A ,cache_dir=__A )
_lowercase = [t[-1] for t in os.walk(os.path.join(__A ,os.listdir(__A )[0] ,'snapshots' ) )]
_lowercase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> str:
_lowercase , _lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' ,safety_checker=__A )
_lowercase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase = jax.random.PRNGKey(0 )
_lowercase = 4
_lowercase = jax.device_count()
_lowercase = num_samples * [prompt]
_lowercase = pipeline.prepare_inputs(__A )
# shard inputs and rng
_lowercase = replicate(__A )
_lowercase = jax.random.split(__A ,__A )
_lowercase = shard(__A )
_lowercase = pipeline(__A ,__A ,__A ,__A ,jit=__A ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(__A ,dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
_lowercase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__A ) == num_samples
def __UpperCAmelCase ( self : str ) -> List[Any]:
_lowercase , _lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='flax' ,safety_checker=__A )
_lowercase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase = jax.random.PRNGKey(0 )
_lowercase = 50
_lowercase = jax.device_count()
_lowercase = num_samples * [prompt]
_lowercase = pipeline.prepare_inputs(__A )
# shard inputs and rng
_lowercase = replicate(__A )
_lowercase = jax.random.split(__A ,__A )
_lowercase = shard(__A )
_lowercase = pipeline(__A ,__A ,__A ,__A ,jit=__A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(__A ,dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase , _lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,safety_checker=__A )
_lowercase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase = jax.random.PRNGKey(0 )
_lowercase = 50
_lowercase = jax.device_count()
_lowercase = num_samples * [prompt]
_lowercase = pipeline.prepare_inputs(__A )
# shard inputs and rng
_lowercase = replicate(__A )
_lowercase = jax.random.split(__A ,__A )
_lowercase = shard(__A )
_lowercase = pipeline(__A ,__A ,__A ,__A ,jit=__A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(__A ,dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase , _lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa )
_lowercase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase = jax.random.PRNGKey(0 )
_lowercase = 50
_lowercase = jax.device_count()
_lowercase = num_samples * [prompt]
_lowercase = pipeline.prepare_inputs(__A )
# shard inputs and rng
_lowercase = replicate(__A )
_lowercase = jax.random.split(__A ,__A )
_lowercase = shard(__A )
_lowercase = pipeline(__A ,__A ,__A ,__A ,jit=__A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(__A ,dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
_lowercase = FlaxDDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,set_alpha_to_one=__A ,steps_offset=1 ,)
_lowercase , _lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,scheduler=__A ,safety_checker=__A ,)
_lowercase = scheduler.create_state()
_lowercase = scheduler_state
_lowercase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase = jax.random.PRNGKey(0 )
_lowercase = 50
_lowercase = jax.device_count()
_lowercase = num_samples * [prompt]
_lowercase = pipeline.prepare_inputs(__A )
# shard inputs and rng
_lowercase = replicate(__A )
_lowercase = jax.random.split(__A ,__A )
_lowercase = shard(__A )
_lowercase = pipeline(__A ,__A ,__A ,__A ,jit=__A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(__A ,dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def __UpperCAmelCase ( self : List[str] ) -> str:
_lowercase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowercase = jax.device_count()
_lowercase = num_samples * [prompt]
_lowercase = jax.random.split(jax.random.PRNGKey(0 ) ,__A )
_lowercase , _lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,safety_checker=__A ,)
_lowercase = replicate(__A )
_lowercase = pipeline.prepare_inputs(__A )
_lowercase = shard(__A )
_lowercase = pipeline(__A ,__A ,__A ,jit=__A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_lowercase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_lowercase , _lowercase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='bf16' ,dtype=jnp.bfloataa ,safety_checker=__A ,use_memory_efficient_attention=__A ,)
_lowercase = replicate(__A )
_lowercase = pipeline.prepare_inputs(__A )
_lowercase = shard(__A )
_lowercase = pipeline(__A ,__A ,__A ,jit=__A ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_lowercase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2 | 67 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main() | 67 | 1 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
snake_case = """base_with_context"""
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] , snake_case__ :List[str] ) -> Tuple:
_lowercase = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
_lowercase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case__ )
for lyr_num, lyr in enumerate(model.encoders ):
_lowercase = weights[F"""layers_{lyr_num}"""]
_lowercase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
_lowercase = ly_weight['attention']
_lowercase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_lowercase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] , snake_case__ :Any ) -> str:
_lowercase = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
_lowercase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case__ )
for lyr_num, lyr in enumerate(model.encoders ):
_lowercase = weights[F"""layers_{lyr_num}"""]
_lowercase = ly_weight['attention']
_lowercase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowercase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
_lowercase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_lowercase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] , snake_case__ :Tuple ) -> List[Any]:
_lowercase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
_lowercase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case__ )
_lowercase = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_lowercase = weights[F"""layers_{lyr_num}"""]
_lowercase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
_lowercase = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
_lowercase = ly_weight['self_attention']
_lowercase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowercase = ly_weight['MultiHeadDotProductAttention_0']
_lowercase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
_lowercase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
_lowercase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
_lowercase = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
_lowercase = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
_lowercase = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> str:
_lowercase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_lowercase = jnp.tree_util.tree_map(onp.array , snake_case__ )
_lowercase = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
_lowercase = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
_lowercase = inference.parse_training_gin_file(snake_case__ , snake_case__ )
_lowercase = inference.InferenceModel(args.checkpoint_path , snake_case__ )
_lowercase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
_lowercase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
_lowercase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
_lowercase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_lowercase = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , snake_case__ )
_lowercase = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , snake_case__ )
_lowercase = load_decoder(ta_checkpoint['target']['decoder'] , snake_case__ )
_lowercase = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
_lowercase = SpectrogramDiffusionPipeline(
notes_encoder=snake_case__ , continuous_encoder=snake_case__ , decoder=snake_case__ , scheduler=snake_case__ , melgan=snake_case__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
snake_case = parser.parse_args()
main(args) | 67 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''big_bird'''
def __init__( self : str ,__A : Union[str, Any]=5_0358 ,__A : Any=768 ,__A : List[str]=12 ,__A : Union[str, Any]=12 ,__A : int=3072 ,__A : Tuple="gelu_new" ,__A : Any=0.1 ,__A : Optional[Any]=0.1 ,__A : Tuple=4096 ,__A : int=2 ,__A : Union[str, Any]=0.02 ,__A : Optional[int]=1e-12 ,__A : List[str]=True ,__A : List[Any]=0 ,__A : Optional[Any]=1 ,__A : Optional[int]=2 ,__A : Optional[int]=66 ,__A : Tuple="block_sparse" ,__A : Optional[int]=True ,__A : Optional[int]=False ,__A : Tuple=64 ,__A : str=3 ,__A : Optional[int]=None ,**__A : Dict ,) -> Union[str, Any]:
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,sep_token_id=__A ,**__A ,)
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
_lowercase = use_cache
_lowercase = rescale_embeddings
_lowercase = attention_type
_lowercase = use_bias
_lowercase = block_size
_lowercase = num_random_blocks
_lowercase = classifier_dropout
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 67 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[int] , snake_case__ :int , snake_case__ :int , snake_case__ :int ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowercase , _lowercase = array[indexa], array[indexa]
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[int] , snake_case__ :int , snake_case__ :int , snake_case__ :int ) -> None:
if length > 1:
_lowercase = int(length / 2 )
for i in range(snake_case__ , low + middle ):
comp_and_swap(snake_case__ , snake_case__ , i + middle , snake_case__ )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
bitonic_merge(snake_case__ , low + middle , snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[int] , snake_case__ :int , snake_case__ :int , snake_case__ :int ) -> None:
if length > 1:
_lowercase = int(length / 2 )
bitonic_sort(snake_case__ , snake_case__ , snake_case__ , 1 )
bitonic_sort(snake_case__ , low + middle , snake_case__ , 0 )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
snake_case = input("""Enter numbers separated by a comma:\n""").strip()
snake_case = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """) | 67 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list:
_lowercase = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
_lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 | 1 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
snake_case = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
snake_case = [ord(letter) for letter in string.ascii_lowercase]
snake_case = {ord(char) for char in VALID_CHARS}
snake_case = ["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[int] , snake_case__ :tuple[int, ...] ) -> str | None:
_lowercase = ""
_lowercase = 42
_lowercase = 42
_lowercase = 42
for keychar, cipherchar in zip(cycle(snake_case__ ) , snake_case__ ):
_lowercase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(snake_case__ )
return decoded
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[int] ) -> list[str]:
_lowercase = []
for key in product(snake_case__ , repeat=3 ):
_lowercase = try_key(snake_case__ , snake_case__ )
if encoded is not None:
possibles.append(snake_case__ )
return possibles
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[str] , snake_case__ :str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str = "p059_cipher.txt" ) -> int:
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = Path(snake_case__ ).parent.joinpath(snake_case__ ).read_text(encoding='utf-8' )
_lowercase = [int(snake_case__ ) for number in data.strip().split(',' )]
_lowercase = filter_valid_chars(snake_case__ )
for common_word in COMMON_WORDS:
_lowercase = filter_common_word(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
break
_lowercase = possibles[0]
return sum(ord(snake_case__ ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""") | 67 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff | 67 | 1 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> List[str]:
_lowercase = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :int ) -> Optional[int]:
_lowercase = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Optional[Any]:
_lowercase = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') )
return token
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
_lowercase = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] , snake_case__ :int , snake_case__ :List[Any] , snake_case__ :Optional[Any] ) -> Any:
_lowercase = 'imagenet-1k-id2label.json'
_lowercase = 1000
_lowercase = 'huggingface/label-files'
_lowercase = num_labels
_lowercase = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = _lowercase = CvtConfig(num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
_lowercase = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
_lowercase = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
_lowercase = [2, 2, 20]
_lowercase = [3, 12, 16]
_lowercase = [192, 768, 1024]
_lowercase = CvtForImageClassification(snake_case__ )
_lowercase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
_lowercase = image_size
_lowercase = torch.load(snake_case__ , map_location=torch.device('cpu' ) )
_lowercase = OrderedDict()
_lowercase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
_lowercase = list_of_state_dict + cls_token(snake_case__ )
_lowercase = list_of_state_dict + embeddings(snake_case__ )
for cnt in range(config.depth[idx] ):
_lowercase = list_of_state_dict + attention(snake_case__ , snake_case__ )
_lowercase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(snake_case__ )
for i in range(len(snake_case__ ) ):
_lowercase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_8_4,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
snake_case = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path) | 67 |
from manim import *
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = Rectangle(height=0.5 ,width=0.5 )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowercase = Rectangle(height=0.25 ,width=0.25 )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('CPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(4 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('GPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
gpu.move_to([-1, -1, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('Model' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
model.move_to([3, -1.0, 0] )
self.add(__A )
_lowercase = []
_lowercase = []
for i, rect in enumerate(__A ):
_lowercase = fill.copy().set_fill(__A ,opacity=0.8 )
target.move_to(__A )
model_arr.append(__A )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__A ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__A )
self.add(*__A ,*__A )
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('Disk' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
disk.move_to([-4, -1.25, 0] )
self.add(__A ,__A )
_lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__A ,__A )
_lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__A ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__A )
_lowercase = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ) )
_lowercase = Square(0.3 )
input.set_fill(__A ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__A ,buff=0.5 )
self.play(Write(__A ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__A ,buff=0.02 )
self.play(MoveToTarget(__A ) )
self.play(FadeOut(__A ) )
_lowercase = Arrow(start=__A ,end=__A ,color=__A ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__A ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowercase = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) )
_lowercase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__A ) ,Circumscribe(model_arr[0] ,color=__A ,**__A ) ,Circumscribe(model_cpu_arr[0] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__A ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_lowercase = AnimationGroup(
FadeOut(__A ,run_time=0.5 ) ,MoveToTarget(__A ,run_time=0.5 ) ,FadeIn(__A ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowercase = 0.7
self.play(
Circumscribe(model_arr[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,Circumscribe(model_arr[i + 1] ,color=__A ,**__A ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__A ,**__A ) ,Circumscribe(cpu_left_col_base[-1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowercase = a_c
_lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__A ) ,FadeOut(__A ,run_time=0.5 ) ,)
_lowercase = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) ,MoveToTarget(__A ) )
self.wait() | 67 | 1 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict , snake_case__ :List[str]="shi-labs/oneformer_demo" ) -> Any:
with open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) as f:
_lowercase = json.load(snake_case__ )
_lowercase = {}
_lowercase = []
_lowercase = []
for key, info in class_info.items():
_lowercase = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(snake_case__ ) )
_lowercase = thing_ids
_lowercase = class_names
return metadata
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : List[Any] ,__A : Optional[int]=7 ,__A : List[str]=3 ,__A : Union[str, Any]=30 ,__A : int=400 ,__A : Dict=None ,__A : List[str]=True ,__A : List[str]=True ,__A : List[str]=[0.5, 0.5, 0.5] ,__A : Tuple=[0.5, 0.5, 0.5] ,__A : str=10 ,__A : Any=False ,__A : List[Any]=255 ,__A : str="shi-labs/oneformer_demo" ,__A : Any="ade20k_panoptic.json" ,__A : List[Any]=10 ,) -> Optional[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = {'shortest_edge': 32, 'longest_edge': 1333} if size is None else size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = class_info_file
_lowercase = prepare_metadata(__A ,__A )
_lowercase = num_text
_lowercase = repo_path
# for the post_process_functions
_lowercase = 2
_lowercase = 10
_lowercase = 10
_lowercase = 3
_lowercase = 4
_lowercase = num_labels
_lowercase = do_reduce_labels
_lowercase = ignore_index
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __UpperCAmelCase ( self : str ,__A : Any ,__A : int=False ) -> Any:
if not batched:
_lowercase = image_inputs[0]
if isinstance(__A ,Image.Image ):
_lowercase , _lowercase = image.size
else:
_lowercase , _lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size['shortest_edge'] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size['shortest_edge'] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase , _lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(__A ,key=lambda __A : item[0] )[0]
_lowercase = max(__A ,key=lambda __A : item[1] )[1]
return expected_height, expected_width
def __UpperCAmelCase ( self : str ) -> Optional[int]:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) ,masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) ,)
@require_torch
@require_vision
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
SCREAMING_SNAKE_CASE_ : List[str] = image_processing_class
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
_lowercase = OneFormerImageProcessorTester(self )
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
return self.image_processing_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'image_mean' ) )
self.assertTrue(hasattr(__A ,'image_std' ) )
self.assertTrue(hasattr(__A ,'do_normalize' ) )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'size' ) )
self.assertTrue(hasattr(__A ,'ignore_index' ) )
self.assertTrue(hasattr(__A ,'class_info_file' ) )
self.assertTrue(hasattr(__A ,'num_text' ) )
self.assertTrue(hasattr(__A ,'repo_path' ) )
self.assertTrue(hasattr(__A ,'metadata' ) )
self.assertTrue(hasattr(__A ,'do_reduce_labels' ) )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
pass
def __UpperCAmelCase ( self : int ) -> List[str]:
# Initialize image_processor
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processing_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase = image_processor(image_inputs[0] ,['semantic'] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processing_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processing_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processing_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processor(
__A ,['semantic'] * len(__A ) ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : int ) -> Dict:
# Initialize image_processor
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processing_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase = image_processor(image_inputs[0] ,['semantic'] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processing_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processing_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processing_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processor(
__A ,['semantic'] * len(__A ) ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
# Initialize image_processor
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processing_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase = image_processor(image_inputs[0] ,['semantic'] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processing_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processing_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processing_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processor(
__A ,['semantic'] * len(__A ) ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Optional[Any]=False ,__A : int=False ,__A : Tuple="np" ) -> int:
_lowercase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_lowercase = self.image_processing_tester.num_labels
_lowercase = None
_lowercase = None
_lowercase = prepare_image_inputs(self.image_processing_tester ,equal_resolution=__A )
if with_segmentation_maps:
_lowercase = num_labels
if is_instance_map:
_lowercase = list(range(__A ) ) * 2
_lowercase = dict(enumerate(__A ) )
_lowercase = [
np.random.randint(0 ,high * 2 ,(img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_lowercase = [Image.fromarray(__A ) for annotation in annotations]
_lowercase = image_processor(
__A ,['semantic'] * len(__A ) ,__A ,return_tensors='pt' ,instance_id_to_semantic_id=__A ,pad_and_return_pixel_mask=__A ,)
return inputs
def __UpperCAmelCase ( self : Dict ) -> Dict:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
def common(__A : Optional[Any]=False ,__A : Optional[int]=None ):
_lowercase = self.comm_get_image_processor_inputs(
with_segmentation_maps=__A ,is_instance_map=__A ,segmentation_type=__A )
_lowercase = inputs['mask_labels']
_lowercase = inputs['class_labels']
_lowercase = inputs['pixel_values']
_lowercase = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(__A ,__A ,__A ):
self.assertEqual(mask_label.shape[0] ,class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] ,pixel_values.shape[2:] )
self.assertEqual(len(__A ) ,self.image_processing_tester.num_text )
common()
common(is_instance_map=__A )
common(is_instance_map=__A ,segmentation_type='pil' )
common(is_instance_map=__A ,segmentation_type='pil' )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
_lowercase = np.zeros((20, 50) )
_lowercase = 1
_lowercase = 1
_lowercase = 1
_lowercase = binary_mask_to_rle(__A )
self.assertEqual(len(__A ) ,4 )
self.assertEqual(rle[0] ,21 )
self.assertEqual(rle[1] ,45 )
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
_lowercase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes ,max_seq_length=77 ,task_seq_length=77 ,class_info_file='ade20k_panoptic.json' ,num_text=self.image_processing_tester.num_text ,repo_path='shi-labs/oneformer_demo' ,)
_lowercase = self.image_processing_tester.get_fake_oneformer_outputs()
_lowercase = fature_extractor.post_process_semantic_segmentation(__A )
self.assertEqual(len(__A ) ,self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape ,(
self.image_processing_tester.height,
self.image_processing_tester.width,
) ,)
_lowercase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_lowercase = fature_extractor.post_process_semantic_segmentation(__A ,target_sizes=__A )
self.assertEqual(segmentation[0].shape ,target_sizes[0] )
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
_lowercase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes ,max_seq_length=77 ,task_seq_length=77 ,class_info_file='ade20k_panoptic.json' ,num_text=self.image_processing_tester.num_text ,repo_path='shi-labs/oneformer_demo' ,)
_lowercase = self.image_processing_tester.get_fake_oneformer_outputs()
_lowercase = image_processor.post_process_instance_segmentation(__A ,threshold=0 )
self.assertTrue(len(__A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) ,__A )
self.assertEqual(
el['segmentation'].shape ,(self.image_processing_tester.height, self.image_processing_tester.width) )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
_lowercase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes ,max_seq_length=77 ,task_seq_length=77 ,class_info_file='ade20k_panoptic.json' ,num_text=self.image_processing_tester.num_text ,repo_path='shi-labs/oneformer_demo' ,)
_lowercase = self.image_processing_tester.get_fake_oneformer_outputs()
_lowercase = image_processor.post_process_panoptic_segmentation(__A ,threshold=0 )
self.assertTrue(len(__A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) ,__A )
self.assertEqual(
el['segmentation'].shape ,(self.image_processing_tester.height, self.image_processing_tester.width) ) | 67 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Any ,__A : Tuple=None ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : int="resnet50" ,__A : int=3 ,__A : List[Any]=32 ,__A : Tuple=3 ,__A : List[Any]=True ,__A : Tuple=True ,) -> Any:
_lowercase = parent
_lowercase = out_indices if out_indices is not None else [4]
_lowercase = stage_names
_lowercase = out_features
_lowercase = backbone
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = use_pretrained_backbone
_lowercase = is_training
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __UpperCAmelCase ( self : Any ,__A : Any ,__A : Dict ) -> Union[str, Any]:
_lowercase = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = TimmBackboneModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : int ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = 'resnet18'
_lowercase = 'microsoft/resnet-18'
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A )
_lowercase = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A ,out_indices=[1, 2, 3] )
_lowercase = AutoBackbone.from_pretrained(__A ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : int ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
_lowercase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase = self.all_model_classes[0]
_lowercase = model_class(__A )
model.to(__A )
_lowercase = self._prepare_for_class(__A ,__A )
_lowercase = model(**__A )
_lowercase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase = copy.deepcopy(__A )
_lowercase = None
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
_lowercase = copy.deepcopy(__A )
_lowercase = False
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A ) | 67 | 1 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A_ :
"""simple docstring"""
def __init__( self : str ,__A : Any ,__A : Optional[int]=3 ,__A : List[str]=32 ,__A : Optional[int]=3 ,__A : Optional[Any]=10 ,__A : Any=[8, 16, 32, 64] ,__A : Optional[int]=[1, 1, 2, 1] ,__A : int=True ,__A : Dict=True ,__A : List[str]="relu" ,__A : List[Any]=3 ,__A : Optional[Any]=None ,__A : Any=["stage2", "stage3", "stage4"] ,__A : str=[2, 3, 4] ,__A : Optional[int]=1 ,) -> List[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = embeddings_size
_lowercase = hidden_sizes
_lowercase = depths
_lowercase = is_training
_lowercase = use_labels
_lowercase = hidden_act
_lowercase = num_labels
_lowercase = scope
_lowercase = len(__A )
_lowercase = out_features
_lowercase = out_indices
_lowercase = num_groups
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] ,self.num_labels )
_lowercase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
return BitConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,out_features=self.out_features ,out_indices=self.out_indices ,num_groups=self.num_groups ,)
def __UpperCAmelCase ( self : List[str] ,__A : int ,__A : Union[str, Any] ,__A : int ) -> List[Any]:
_lowercase = BitModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __UpperCAmelCase ( self : List[str] ,__A : List[Any] ,__A : Optional[Any] ,__A : str ) -> Union[str, Any]:
_lowercase = self.num_labels
_lowercase = BitForImageClassification(__A )
model.to(__A )
model.eval()
_lowercase = model(__A ,labels=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : str ,__A : List[str] ,__A : Optional[Any] ,__A : int ) -> List[Any]:
_lowercase = BitBackbone(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowercase = None
_lowercase = BitBackbone(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Dict = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
_lowercase = BitModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return
@unittest.skip(reason='Bit does not output attentions' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def __UpperCAmelCase ( self : str ) -> Dict:
pass
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(config=__A )
for name, module in model.named_modules():
if isinstance(__A ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
def check_hidden_states_output(__A : str ,__A : List[str] ,__A : int ):
_lowercase = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(**self._prepare_for_class(__A ,__A ) )
_lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowercase = self.model_tester.num_stages
self.assertEqual(len(__A ) ,expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowercase = layer_type
_lowercase = True
check_hidden_states_output(__A ,__A ,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase = True
check_hidden_states_output(__A ,__A ,__A )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
pass
def __UpperCAmelCase ( self : Any ) -> List[Any]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = BitModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : Tuple ) -> Dict:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self : str ) -> Any:
_lowercase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__A )
_lowercase = self.default_image_processor
_lowercase = prepare_img()
_lowercase = image_processor(images=__A ,return_tensors='pt' ).to(__A )
# forward pass
with torch.no_grad():
_lowercase = model(**__A )
# verify the logits
_lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,__A )
_lowercase = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__A ,atol=1e-4 ) )
@require_torch
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = (BitBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BitConfig
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = BitModelTester(self ) | 67 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 1 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
snake_case = {
"""b0""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 2_2_4,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 2_4_0,
"""dropout_rate""": 0.2,
"""dw_padding""": [1_6],
},
"""b2""": {
"""hidden_dim""": 1_4_0_8,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 2_6_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 1_6],
},
"""b3""": {
"""hidden_dim""": 1_5_3_6,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 3_0_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 1_8],
},
"""b4""": {
"""hidden_dim""": 1_7_9_2,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 3_8_0,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_0_4_8,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 4_5_6,
"""dropout_rate""": 0.4,
"""dw_padding""": [1_3, 2_7],
},
"""b6""": {
"""hidden_dim""": 2_3_0_4,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 5_2_8,
"""dropout_rate""": 0.5,
"""dw_padding""": [3_1],
},
"""b7""": {
"""hidden_dim""": 2_5_6_0,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 6_0_0,
"""dropout_rate""": 0.5,
"""dw_padding""": [1_8],
},
}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> Tuple:
_lowercase = EfficientNetConfig()
_lowercase = CONFIG_MAP[model_name]['hidden_dim']
_lowercase = CONFIG_MAP[model_name]['width_coef']
_lowercase = CONFIG_MAP[model_name]['depth_coef']
_lowercase = CONFIG_MAP[model_name]['image_size']
_lowercase = CONFIG_MAP[model_name]['dropout_rate']
_lowercase = CONFIG_MAP[model_name]['dw_padding']
_lowercase = 'huggingface/label-files'
_lowercase = 'imagenet-1k-id2label.json'
_lowercase = 1000
_lowercase = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Tuple:
_lowercase = CONFIG_MAP[model_name]['image_size']
_lowercase = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=snake_case__ , )
return preprocessor
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] ) -> str:
_lowercase = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
_lowercase = sorted(set(snake_case__ ) )
_lowercase = len(snake_case__ )
_lowercase = {b: str(snake_case__ ) for b, i in zip(snake_case__ , range(snake_case__ ) )}
_lowercase = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
_lowercase = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
_lowercase = {}
for item in rename_keys:
if item[0] in original_param_names:
_lowercase = 'efficientnet.' + item[1]
_lowercase = 'classifier.weight'
_lowercase = 'classifier.bias'
return key_mapping
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :Dict , snake_case__ :List[Any] ) -> List[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
_lowercase = key_mapping[key]
if "_conv" in key and "kernel" in key:
_lowercase = torch.from_numpy(snake_case__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_lowercase = torch.from_numpy(snake_case__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_lowercase = torch.from_numpy(np.transpose(snake_case__ ) )
else:
_lowercase = torch.from_numpy(snake_case__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(snake_case__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any , snake_case__ :str , snake_case__ :Optional[Any] , snake_case__ :Tuple ) -> List[Any]:
_lowercase = model_classes[model_name](
include_top=snake_case__ , weights='imagenet' , input_tensor=snake_case__ , input_shape=snake_case__ , pooling=snake_case__ , classes=1000 , classifier_activation='softmax' , )
_lowercase = original_model.trainable_variables
_lowercase = original_model.non_trainable_variables
_lowercase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_lowercase = param.numpy()
_lowercase = list(tf_params.keys() )
# Load HuggingFace model
_lowercase = get_efficientnet_config(snake_case__ )
_lowercase = EfficientNetForImageClassification(snake_case__ ).eval()
_lowercase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
_lowercase = rename_keys(snake_case__ )
replace_params(snake_case__ , snake_case__ , snake_case__ )
# Initialize preprocessor and preprocess input image
_lowercase = convert_image_processor(snake_case__ )
_lowercase = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
_lowercase = hf_model(**snake_case__ )
_lowercase = outputs.logits.detach().numpy()
# Original model inference
_lowercase = False
_lowercase = CONFIG_MAP[model_name]['image_size']
_lowercase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_lowercase = image.img_to_array(snake_case__ )
_lowercase = np.expand_dims(snake_case__ , axis=0 )
_lowercase = original_model.predict(snake_case__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(snake_case__ , snake_case__ , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(snake_case__ ):
os.mkdir(snake_case__ )
# Save converted model and image processor
hf_model.save_pretrained(snake_case__ )
preprocessor.save_pretrained(snake_case__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
_lowercase = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(snake_case__ )
hf_model.push_to_hub(snake_case__ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
snake_case = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub) | 67 |
snake_case = {str(digit): digit**5 for digit in range(1_0)}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case__ ) )
if __name__ == "__main__":
print(solution()) | 67 | 1 |
import argparse
snake_case = """docs/source/_static/js/custom.js"""
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] ) -> Union[str, Any]:
with open(snake_case__ , encoding='utf-8' , newline='\n' ) as f:
_lowercase = f.readlines()
_lowercase = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
_lowercase = F"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F""" \"v{version}\": \"v{version}\",\n"""
with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(snake_case__ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
snake_case = parser.parse_args()
update_custom_js(args.version) | 67 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> list[int]:
_lowercase = str(snake_case__ )
_lowercase = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 11 ) -> list[int]:
_lowercase = []
_lowercase = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
_lowercase = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""") | 67 | 1 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 3 , snake_case__ :int = 7 , snake_case__ :int = 100_0000 ) -> int:
_lowercase = 0
_lowercase = 1
for current_denominator in range(1 , limit + 1 ):
_lowercase = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_lowercase = current_numerator
_lowercase = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0)) | 67 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : Tuple ,__A : Any=99 ,__A : Any=13 ,__A : Dict=7 ,__A : List[Any]=9 ,__A : Dict=True ,__A : Any=True ,__A : Tuple=False ,__A : str=32 ,__A : int=5 ,__A : List[str]=4 ,__A : Optional[Any]=37 ,__A : int=8 ,__A : Any=0.1 ,__A : Dict=0.002 ,__A : Union[str, Any]=1 ,__A : Optional[Any]=0 ,__A : int=0 ,__A : Tuple=None ,__A : str=None ,) -> List[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = encoder_seq_length
_lowercase = decoder_seq_length
# For common tests
_lowercase = self.decoder_seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = d_ff
_lowercase = relative_attention_num_buckets
_lowercase = dropout_rate
_lowercase = initializer_factor
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = decoder_start_token_id
_lowercase = None
_lowercase = decoder_layers
def __UpperCAmelCase ( self : Dict ) -> Dict:
return TaConfig.from_pretrained('google/umt5-base' )
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ,__A : int ,__A : str ,__A : List[str]=None ,__A : List[str]=None ,__A : Any=None ,__A : List[Any]=None ,__A : str=None ,) -> Tuple:
if attention_mask is None:
_lowercase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowercase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowercase = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=__A )
if decoder_head_mask is None:
_lowercase = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=__A )
if cross_attn_head_mask is None:
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowercase = input_ids.clamp(self.pad_token_id + 1 )
_lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowercase = self.get_config()
_lowercase = config.num_attention_heads
_lowercase = self.prepare_inputs_dict(__A ,__A ,__A )
return config, input_dict
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase , _lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Dict ) -> Any:
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : List[str] ,__A : Dict ,__A : List[str] ,__A : List[Any] ,__A : Tuple ,__A : int ,) -> Tuple:
_lowercase = UMTaModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
input_ids=__A ,decoder_input_ids=__A ,attention_mask=__A ,decoder_attention_mask=__A ,)
_lowercase = model(input_ids=__A ,decoder_input_ids=__A )
_lowercase = result.last_hidden_state
_lowercase = result.past_key_values
_lowercase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def __UpperCAmelCase ( self : List[Any] ,__A : Tuple ,__A : int ,__A : Any ,__A : Tuple ,__A : Any ,__A : Optional[int] ,) -> List[str]:
_lowercase = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
_lowercase = model(__A ,use_cache=__A )
_lowercase = model(__A )
_lowercase = model(__A ,use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase = model(__A )['last_hidden_state']
_lowercase = model(__A ,past_key_values=__A )['last_hidden_state']
# select random slice
_lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1e-3 ) )
def __UpperCAmelCase ( self : Any ,__A : List[str] ,__A : List[str] ,) -> int:
_lowercase = UMTaModel(config=__A ).to(__A ).half().eval()
_lowercase = model(**__A )['last_hidden_state']
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ : Dict = [0.8, 0.9]
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=__A ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = config_and_inputs[0]
_lowercase = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
_lowercase = {
'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=__A ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
}
for attn_name, (name, mask) in zip(__A ,head_masking.items() ):
_lowercase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=__A )
_lowercase = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=__A ,return_dict_in_generate=__A ,**__A ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __UpperCAmelCase ( self : str ) -> List[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=__A ).to(__A )
_lowercase = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=__A ,legacy=__A )
_lowercase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowercase = tokenizer(__A ,return_tensors='pt' ,padding=__A ).input_ids
# fmt: off
_lowercase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A ,__A )
_lowercase = model.generate(input_ids.to(__A ) )
_lowercase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowercase = tokenizer.batch_decode(__A )
self.assertEqual(__A ,__A ) | 67 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''ClapFeatureExtractor'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : int ,__A : Union[str, Any] ,__A : Union[str, Any] ) -> Dict:
super().__init__(__A ,__A )
def __call__( self : Dict ,__A : Dict=None ,__A : Tuple=None ,__A : Optional[Any]=None ,**__A : List[Any] ) -> List[str]:
_lowercase = kwargs.pop('sampling_rate' ,__A )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
_lowercase = self.tokenizer(__A ,return_tensors=__A ,**__A )
if audios is not None:
_lowercase = self.feature_extractor(
__A ,sampling_rate=__A ,return_tensors=__A ,**__A )
if text is not None and audios is not None:
_lowercase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) ,tensor_type=__A )
def __UpperCAmelCase ( self : List[str] ,*__A : Optional[Any] ,**__A : Optional[Any] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__A ,**__A )
def __UpperCAmelCase ( self : Tuple ,*__A : int ,**__A : List[Any] ) -> Tuple:
return self.tokenizer.decode(*__A ,**__A )
@property
def __UpperCAmelCase ( self : str ) -> List[Any]:
_lowercase = self.tokenizer.model_input_names
_lowercase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) ) | 67 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=__A ,)
assert hasattr(self ,'env' )
def __UpperCAmelCase ( self : str ,__A : Tuple ) -> int:
# configuration for running training on smdistributed Model Parallel
_lowercase = {
'enabled': True,
'processes_per_host': 8,
}
_lowercase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowercase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowercase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__A ,instance_type=self.instance_type ,debugger_hook_config=__A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=__A ,py_version='py36' ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Any ) -> Optional[Any]:
TrainingJobAnalytics(__A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Optional[Any]:
# create estimator
_lowercase = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__A ) | 67 | 1 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
snake_case = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
snake_case = typing.Union[np.floataa, int, float] # noqa: UP007
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Vector , snake_case__ :Vector ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(snake_case__ ) - np.asarray(snake_case__ )) ** 2 ) )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Vector , snake_case__ :Vector ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(snake_case__ , snake_case__ ) ) ** (1 / 2)
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__ ( ) -> None:
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=1_0000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=1_0000 , globals=globals() , ) )
benchmark() | 67 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A ) | 67 | 1 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
snake_case = logging.get_logger(__name__)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = CLIPConfig
SCREAMING_SNAKE_CASE_ : Dict = ['''CLIPEncoderLayer''']
def __init__( self : Optional[int] ,__A : CLIPConfig ) -> Tuple:
super().__init__(__A )
_lowercase = CLIPVisionModelWithProjection(config.vision_config )
_lowercase = nn.Linear(config.vision_config.projection_dim ,1 )
_lowercase = nn.Linear(config.vision_config.projection_dim ,1 )
@torch.no_grad()
def __UpperCAmelCase ( self : List[Any] ,__A : Tuple ,__A : int ,__A : Optional[int]=0.5 ,__A : Union[str, Any]=0.5 ) -> Any:
_lowercase = self.vision_model(__A )[0]
_lowercase = self.p_head(__A )
_lowercase = nsfw_detected.flatten()
_lowercase = nsfw_detected > p_threshold
_lowercase = nsfw_detected.tolist()
if any(__A ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(__A ):
if nsfw_detected_:
_lowercase = np.zeros(images[idx].shape )
_lowercase = self.w_head(__A )
_lowercase = watermark_detected.flatten()
_lowercase = watermark_detected > w_threshold
_lowercase = watermark_detected.tolist()
if any(__A ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(__A ):
if watermark_detected_:
_lowercase = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected | 67 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.dummy_uncond_unet
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ).images
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ,return_dict=__A )[0]
_lowercase = image[0, -3:, -3:, -1]
_lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = 'google/ncsnpp-celebahq-256'
_lowercase = UNetaDModel.from_pretrained(__A )
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=20 ,generator=__A ,output_type='numpy' ).images
_lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 67 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = RobertaTokenizer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RobertaTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Any = {'''cls_token''': '''<s>'''}
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_lowercase = dict(zip(__A ,range(len(__A ) ) ) )
_lowercase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowercase = {'unk_token': '<unk>'}
_lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def __UpperCAmelCase ( self : List[str] ,**__A : List[Any] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : Union[str, Any] ,**__A : Any ) -> str:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : str ,__A : List[str] ) -> List[Any]:
_lowercase = 'lower newer'
_lowercase = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
_lowercase = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_lowercase = 'lower newer'
_lowercase = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowercase = tokenizer.tokenize(__A ) # , add_prefix_space=True)
self.assertListEqual(__A ,__A )
_lowercase = tokens + [tokenizer.unk_token]
_lowercase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) ,__A )
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
_lowercase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' ,add_special_tokens=__A ) ,[0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' ,add_special_tokens=__A ) ,[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] ,)
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
_lowercase = self.tokenizer_class.from_pretrained('roberta-base' )
_lowercase = tokenizer.encode('sequence builders' ,add_special_tokens=__A )
_lowercase = tokenizer.encode('multi-sequence build' ,add_special_tokens=__A )
_lowercase = tokenizer.encode(
'sequence builders' ,add_special_tokens=__A ,add_prefix_space=__A )
_lowercase = tokenizer.encode(
'sequence builders' ,'multi-sequence build' ,add_special_tokens=__A ,add_prefix_space=__A )
_lowercase = tokenizer.build_inputs_with_special_tokens(__A )
_lowercase = tokenizer.build_inputs_with_special_tokens(__A ,__A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
_lowercase = self.get_tokenizer()
_lowercase = 'Encode this sequence.'
_lowercase = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
_lowercase = tokenizer.encode(__A ,add_special_tokens=__A ,add_prefix_space=__A )
_lowercase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__A ,__A )
_lowercase = tokenizer.encode(__A ,add_special_tokens=__A ,add_prefix_space=__A )
_lowercase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__A ,__A )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_lowercase = tokenizer.encode(__A ,add_special_tokens=__A )
_lowercase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__A ,__A )
# Testing spaces after special tokens
_lowercase = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(__A ,lstrip=__A ,rstrip=__A )} ) # mask token has a left space
_lowercase = tokenizer.convert_tokens_to_ids(__A )
_lowercase = 'Encode <mask> sequence'
_lowercase = 'Encode <mask>sequence'
_lowercase = tokenizer.encode(__A )
_lowercase = encoded.index(__A )
_lowercase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__A ,__A )
_lowercase = tokenizer.encode(__A )
_lowercase = encoded.index(__A )
_lowercase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__A ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
pass
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase = self.rust_tokenizer_class.from_pretrained(__A ,**__A )
_lowercase = self.tokenizer_class.from_pretrained(__A ,**__A )
_lowercase = 'A, <mask> AllenNLP sentence.'
_lowercase = tokenizer_r.encode_plus(__A ,add_special_tokens=__A ,return_token_type_ids=__A )
_lowercase = tokenizer_p.encode_plus(__A ,add_special_tokens=__A ,return_token_type_ids=__A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,)
_lowercase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_lowercase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__A ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
__A ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def __UpperCAmelCase ( self : int ) -> Any:
for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ):
_lowercase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A )
_lowercase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowercase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] ,__A )
self.assertEqual(post_processor_state['add_prefix_space'] ,__A )
self.assertEqual(post_processor_state['trim_offsets'] ,__A )
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowercase = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_lowercase = F"""{text_of_1_token} {text_of_1_token}"""
_lowercase = self.rust_tokenizer_class.from_pretrained(
__A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A )
_lowercase = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__A ) + 1, len(__A ) + 1 + len(__A )) ,)
_lowercase = self.rust_tokenizer_class.from_pretrained(
__A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A )
_lowercase = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__A ) + 1, len(__A ) + 1 + len(__A )) ,)
_lowercase = self.rust_tokenizer_class.from_pretrained(
__A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A )
_lowercase = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__A ), len(__A ) + 1 + len(__A )) ,)
_lowercase = self.rust_tokenizer_class.from_pretrained(
__A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A )
_lowercase = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__A ), len(__A ) + 1 + len(__A )) ,)
_lowercase = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowercase = self.rust_tokenizer_class.from_pretrained(
__A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A )
_lowercase = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(__A ) + 1, 1 + len(__A ) + 1 + len(__A )) ,)
_lowercase = self.rust_tokenizer_class.from_pretrained(
__A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A )
_lowercase = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) ,)
_lowercase = self.rust_tokenizer_class.from_pretrained(
__A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A )
_lowercase = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) ,) | 67 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> list:
_lowercase = len(snake_case__ )
_lowercase = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
_lowercase = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
_lowercase = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 67 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = CanineTokenizer
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
super().setUp()
_lowercase = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : List[str] ) -> int:
return CanineTokenizer.from_pretrained('google/canine-s' )
def __UpperCAmelCase ( self : Any ,**__A : List[Any] ) -> CanineTokenizer:
_lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname ,**__A )
_lowercase = 1024
return tokenizer
@require_torch
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
_lowercase = self.canine_tokenizer
_lowercase = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.']
# fmt: off
_lowercase = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
_lowercase = tokenizer(__A ,padding=__A ,return_tensors='pt' )
self.assertIsInstance(__A ,__A )
_lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__A ,__A )
self.assertEqual((2, 39) ,batch.input_ids.shape )
self.assertEqual((2, 39) ,batch.attention_mask.shape )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
_lowercase = self.canine_tokenizer
_lowercase = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.']
_lowercase = tokenizer(__A ,padding=__A ,return_tensors='pt' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('input_ids' ,__A )
self.assertIn('attention_mask' ,__A )
self.assertIn('token_type_ids' ,__A )
@require_torch
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
_lowercase = self.canine_tokenizer
_lowercase = [
'What\'s the weater?',
'It\'s about 25 degrees.',
]
_lowercase = tokenizer(
text_target=__A ,max_length=32 ,padding='max_length' ,truncation=__A ,return_tensors='pt' )
self.assertEqual(32 ,targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
# safety check on max_len default value so we are sure the test works
_lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length ,42 )
# Now let's start the test
_lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase = tempfile.mkdtemp()
_lowercase = ' He is very happy, UNwant\u00E9d,running'
_lowercase = tokenizer.encode(__A ,add_special_tokens=__A )
tokenizer.save_pretrained(__A )
_lowercase = tokenizer.__class__.from_pretrained(__A )
_lowercase = after_tokenizer.encode(__A ,add_special_tokens=__A )
self.assertListEqual(__A ,__A )
shutil.rmtree(__A )
_lowercase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase = tempfile.mkdtemp()
_lowercase = ' He is very happy, UNwant\u00E9d,running'
_lowercase = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_lowercase = chr(0xE007 )
additional_special_tokens.append(__A )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase = tokenizer.encode(__A ,add_special_tokens=__A )
tokenizer.save_pretrained(__A )
_lowercase = tokenizer.__class__.from_pretrained(__A )
_lowercase = after_tokenizer.encode(__A ,add_special_tokens=__A )
self.assertListEqual(__A ,__A )
self.assertIn(__A ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,42 )
_lowercase = tokenizer.__class__.from_pretrained(__A ,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length ,43 )
shutil.rmtree(__A )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowercase , _lowercase = self.get_clean_sequence(__A )
# a special token for Canine can be defined as follows:
_lowercase = 0xE005
_lowercase = chr(__A )
tokenizer.add_special_tokens({'cls_token': special_token} )
_lowercase = tokenizer.encode(__A ,add_special_tokens=__A )
self.assertEqual(len(__A ) ,1 )
_lowercase = tokenizer.decode(ids + encoded_special_token ,clean_up_tokenization_spaces=__A )
_lowercase = tokenizer.encode(__A ,add_special_tokens=__A )
_lowercase = tokenizer.encode(__A ,add_special_tokens=__A )
_lowercase = tokenizer.encode(__A ,add_special_tokens=__A )
self.assertEqual(__A ,input_encoded + special_token_id )
_lowercase = tokenizer.decode(__A ,skip_special_tokens=__A )
self.assertTrue(special_token not in decoded )
def __UpperCAmelCase ( self : List[str] ) -> Dict:
_lowercase = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowercase = chr(0xE005 )
_lowercase = chr(0xE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] ,special_tokens=__A )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} )
_lowercase = tokenizer.tokenize(__A )
_lowercase = tokenizer.tokenize(__A )
self.assertEqual(len(__A ) ,1 )
self.assertEqual(len(__A ) ,1 )
self.assertEqual(token_a[0] ,__A )
self.assertEqual(token_a[0] ,__A )
@require_tokenizers
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# a special token for Canine can be defined as follows:
_lowercase = 0xE006
_lowercase = chr(__A )
_lowercase = AddedToken(__A ,lstrip=__A )
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__A )
tokenizer.from_pretrained(__A )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
_lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__A )
with open(os.path.join(__A ,'special_tokens_map.json' ) ,encoding='utf-8' ) as json_file:
_lowercase = json.load(__A )
with open(os.path.join(__A ,'tokenizer_config.json' ) ,encoding='utf-8' ) as json_file:
_lowercase = json.load(__A )
# a special token for Canine can be defined as follows:
_lowercase = 0xE006
_lowercase = chr(__A )
_lowercase = [new_token_a]
_lowercase = [new_token_a]
with open(os.path.join(__A ,'special_tokens_map.json' ) ,'w' ,encoding='utf-8' ) as outfile:
json.dump(__A ,__A )
with open(os.path.join(__A ,'tokenizer_config.json' ) ,'w' ,encoding='utf-8' ) as outfile:
json.dump(__A ,__A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase = tokenizer_class.from_pretrained(__A ,extra_ids=0 )
self.assertIn(__A ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) ,)
_lowercase = 0xE007
_lowercase = chr(__A )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase = [AddedToken(__A ,lstrip=__A )]
_lowercase = tokenizer_class.from_pretrained(
__A ,additional_special_tokens=__A ,extra_ids=0 )
self.assertIn(__A ,tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowercase = 'hello world'
if self.space_between_special_tokens:
_lowercase = '[CLS] hello world [SEP]'
else:
_lowercase = input
_lowercase = tokenizer.encode(__A ,add_special_tokens=__A )
_lowercase = tokenizer.decode(__A ,spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__A ,[output, output.lower()] )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowercase = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase = 'a'
_lowercase = ord(__A )
for attr in attributes_list:
setattr(__A ,attr + '_id' ,__A )
self.assertEqual(getattr(__A ,__A ) ,__A )
self.assertEqual(getattr(__A ,attr + '_id' ) ,__A )
setattr(__A ,attr + '_id' ,__A )
self.assertEqual(getattr(__A ,__A ) ,__A )
self.assertEqual(getattr(__A ,attr + '_id' ) ,__A )
setattr(__A ,'additional_special_tokens_ids' ,[] )
self.assertListEqual(getattr(__A ,'additional_special_tokens' ) ,[] )
self.assertListEqual(getattr(__A ,'additional_special_tokens_ids' ) ,[] )
_lowercase = 0xE006
_lowercase = chr(__A )
setattr(__A ,'additional_special_tokens_ids' ,[additional_special_token_id] )
self.assertListEqual(getattr(__A ,'additional_special_tokens' ) ,[additional_special_token] )
self.assertListEqual(getattr(__A ,'additional_special_tokens_ids' ) ,[additional_special_token_id] )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
def __UpperCAmelCase ( self : Any ) -> Tuple:
pass
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> str:
pass
def __UpperCAmelCase ( self : Tuple ) -> int:
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
pass
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
pass
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
pass | 67 |
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray ) -> bool:
return np.array_equal(snake_case__ , matrix.conjugate().T )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray , snake_case__ :np.ndarray ) -> Any:
_lowercase = v.conjugate().T
_lowercase = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
_lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 67 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] , snake_case__ :Optional[Any]=0.999 , snake_case__ :Tuple="cosine" , ) -> Optional[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ :List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ :Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowercase = []
for i in range(snake_case__ ):
_lowercase = i / num_diffusion_timesteps
_lowercase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class A_ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ : Dict = 2
@register_to_config
def __init__( self : Union[str, Any] ,__A : int = 1000 ,__A : float = 0.00085 ,__A : float = 0.012 ,__A : str = "linear" ,__A : Optional[Union[np.ndarray, List[float]]] = None ,__A : str = "epsilon" ,__A : str = "linspace" ,__A : int = 0 ,) -> Union[str, Any]:
if trained_betas is not None:
_lowercase = torch.tensor(__A ,dtype=torch.floataa )
elif beta_schedule == "linear":
_lowercase = torch.linspace(__A ,__A ,__A ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,__A ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase = betas_for_alpha_bar(__A )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_lowercase = 1.0 - self.betas
_lowercase = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(__A ,__A ,__A )
def __UpperCAmelCase ( self : str ,__A : List[str] ,__A : Any=None ) -> str:
if schedule_timesteps is None:
_lowercase = self.timesteps
_lowercase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowercase = 1 if len(__A ) > 1 else 0
else:
_lowercase = timestep.cpu().item() if torch.is_tensor(__A ) else timestep
_lowercase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __UpperCAmelCase ( self : int ) -> Tuple:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __UpperCAmelCase ( self : List[str] ,__A : torch.FloatTensor ,__A : Union[float, torch.FloatTensor] ,) -> torch.FloatTensor:
_lowercase = self.index_for_timestep(__A )
if self.state_in_first_order:
_lowercase = self.sigmas[step_index]
else:
_lowercase = self.sigmas_interpol[step_index]
_lowercase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __UpperCAmelCase ( self : Tuple ,__A : int ,__A : Union[str, torch.device] = None ,__A : Optional[int] = None ,) -> str:
_lowercase = num_inference_steps
_lowercase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowercase = np.linspace(0 ,num_train_timesteps - 1 ,__A ,dtype=__A )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowercase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase = (np.arange(0 ,__A ) * step_ratio).round()[::-1].copy().astype(__A )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowercase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowercase = (np.arange(__A ,0 ,-step_ratio )).round().copy().astype(__A )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_lowercase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowercase = torch.from_numpy(np.log(__A ) ).to(__A )
_lowercase = np.interp(__A ,np.arange(0 ,len(__A ) ) ,__A )
_lowercase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowercase = torch.from_numpy(__A ).to(device=__A )
# interpolate sigmas
_lowercase = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
_lowercase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowercase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__A ).startswith('mps' ):
# mps does not support float64
_lowercase = torch.from_numpy(__A ).to(__A ,dtype=torch.floataa )
else:
_lowercase = torch.from_numpy(__A ).to(__A )
# interpolate timesteps
_lowercase = self.sigma_to_t(__A ).to(__A ,dtype=timesteps.dtype )
_lowercase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
_lowercase = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowercase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowercase = defaultdict(__A )
def __UpperCAmelCase ( self : Optional[Any] ,__A : str ) -> List[str]:
# get log sigma
_lowercase = sigma.log()
# get distribution
_lowercase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowercase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowercase = low_idx + 1
_lowercase = self.log_sigmas[low_idx]
_lowercase = self.log_sigmas[high_idx]
# interpolate sigmas
_lowercase = (low - log_sigma) / (low - high)
_lowercase = w.clamp(0 ,1 )
# transform interpolation to time range
_lowercase = (1 - w) * low_idx + w * high_idx
_lowercase = t.view(sigma.shape )
return t
@property
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
return self.sample is None
def __UpperCAmelCase ( self : Dict ,__A : Union[torch.FloatTensor, np.ndarray] ,__A : Union[float, torch.FloatTensor] ,__A : Union[torch.FloatTensor, np.ndarray] ,__A : bool = True ,) -> Union[SchedulerOutput, Tuple]:
_lowercase = self.index_for_timestep(__A )
# advance index counter by 1
_lowercase = timestep.cpu().item() if torch.is_tensor(__A ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowercase = self.sigmas[step_index]
_lowercase = self.sigmas_interpol[step_index + 1]
_lowercase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowercase = self.sigmas[step_index - 1]
_lowercase = self.sigmas_interpol[step_index]
_lowercase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowercase = 0
_lowercase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowercase = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowercase = sigma_hat if self.state_in_first_order else sigma_interpol
_lowercase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowercase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowercase = sigma_interpol - sigma_hat
# store for 2nd order step
_lowercase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowercase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowercase = sigma_next - sigma_hat
_lowercase = self.sample
_lowercase = None
_lowercase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__A )
def __UpperCAmelCase ( self : List[Any] ,__A : torch.FloatTensor ,__A : torch.FloatTensor ,__A : torch.FloatTensor ,) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowercase = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__A ):
# mps does not support float64
_lowercase = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
_lowercase = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
_lowercase = self.timesteps.to(original_samples.device )
_lowercase = timesteps.to(original_samples.device )
_lowercase = [self.index_for_timestep(__A ,__A ) for t in timesteps]
_lowercase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowercase = sigma.unsqueeze(-1 )
_lowercase = original_samples + noise * sigma
return noisy_samples
def __len__( self : str ) -> Tuple:
return self.config.num_train_timesteps | 67 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : Dict ,__A : List[Any]=7 ,__A : Dict=3 ,__A : Tuple=30 ,__A : Dict=400 ,__A : Any=True ,__A : List[Any]=None ,__A : Any=True ,__A : List[str]=[0.5, 0.5, 0.5] ,__A : Union[str, Any]=[0.5, 0.5, 0.5] ,__A : int=True ,__A : List[str]=1 / 255 ,__A : Union[str, Any]=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_pad
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : List[str]=False ) -> Union[str, Any]:
if not batched:
_lowercase = image_inputs[0]
if isinstance(__A ,Image.Image ):
_lowercase , _lowercase = image.size
else:
_lowercase , _lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size['shortest_edge'] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size['shortest_edge'] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase , _lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(__A ,key=lambda __A : item[0] )[0]
_lowercase = max(__A ,key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'image_mean' ) )
self.assertTrue(hasattr(__A ,'image_std' ) )
self.assertTrue(hasattr(__A ,'do_normalize' ) )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'do_rescale' ) )
self.assertTrue(hasattr(__A ,'do_pad' ) )
self.assertTrue(hasattr(__A ,'size' ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# prepare image and target
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowercase = DetaImageProcessor()
_lowercase = image_processing(images=__A ,annotations=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowercase = DetaImageProcessor(format='coco_panoptic' )
_lowercase = image_processing(images=__A ,annotations=__A ,masks_path=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify masks
_lowercase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__A )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) ) | 67 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE_ : Dict = 1_0
def __UpperCAmelCase ( self : Tuple ,**__A : Tuple ) -> List[str]:
_lowercase = {
'num_train_timesteps': 1100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**__A )
return config
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__A ,beta_end=__A )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__A )
def __UpperCAmelCase ( self : Dict ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def __UpperCAmelCase ( self : str ) -> int:
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config()
_lowercase = scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
_lowercase = torch.manual_seed(0 )
_lowercase = self.dummy_model()
_lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowercase = sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
_lowercase = scheduler.scale_model_input(__A ,__A )
_lowercase = model(__A ,__A )
_lowercase = scheduler.step(__A ,__A ,__A ,generator=__A )
_lowercase = output.prev_sample
_lowercase = torch.sum(torch.abs(__A ) )
_lowercase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config(prediction_type='v_prediction' )
_lowercase = scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
_lowercase = torch.manual_seed(0 )
_lowercase = self.dummy_model()
_lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowercase = sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
_lowercase = scheduler.scale_model_input(__A ,__A )
_lowercase = model(__A ,__A )
_lowercase = scheduler.step(__A ,__A ,__A ,generator=__A )
_lowercase = output.prev_sample
_lowercase = torch.sum(torch.abs(__A ) )
_lowercase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config()
_lowercase = scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps ,device=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = self.dummy_model()
_lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowercase = sample.to(__A )
for t in scheduler.timesteps:
_lowercase = scheduler.scale_model_input(__A ,__A )
_lowercase = model(__A ,__A )
_lowercase = scheduler.step(__A ,__A ,__A ,generator=__A )
_lowercase = output.prev_sample
_lowercase = torch.sum(torch.abs(__A ) )
_lowercase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.scheduler_classes[0]
_lowercase = self.get_scheduler_config()
_lowercase = scheduler_class(**__A ,use_karras_sigmas=__A )
scheduler.set_timesteps(self.num_inference_steps ,device=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = self.dummy_model()
_lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowercase = sample.to(__A )
for t in scheduler.timesteps:
_lowercase = scheduler.scale_model_input(__A ,__A )
_lowercase = model(__A ,__A )
_lowercase = scheduler.step(__A ,__A ,__A ,generator=__A )
_lowercase = output.prev_sample
_lowercase = torch.sum(torch.abs(__A ) )
_lowercase = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3 | 67 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
snake_case = False
snake_case = False
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Namespace ) -> Tuple:
return TrainCommand(snake_case__ )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( __A : ArgumentParser ) -> List[Any]:
_lowercase = parser.add_parser('train' ,help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' ,type=__A ,required=__A ,help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' ,)
train_parser.add_argument(
'--column_label' ,type=__A ,default=0 ,help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' ,type=__A ,default=1 ,help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' ,type=__A ,default=2 ,help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' ,action='store_true' ,help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' ,type=__A ,default='' ,help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' ,type=__A ,default=0.1 ,help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' ,)
train_parser.add_argument('--output' ,type=__A ,default='./' ,help='path to saved the trained model.' )
train_parser.add_argument(
'--task' ,type=__A ,default='text_classification' ,help='Task to train the model on.' )
train_parser.add_argument(
'--model' ,type=__A ,default='bert-base-uncased' ,help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' ,type=__A ,default=32 ,help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' ,type=__A ,default=64 ,help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' ,type=__A ,default=3e-5 ,help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' ,type=__A ,default=1e-08 ,help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self : Optional[Any] ,__A : Namespace ) -> Tuple:
_lowercase = logging.get_logger('transformers-cli/training' )
_lowercase = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output ,exist_ok=__A )
_lowercase = args.output
_lowercase = args.column_label
_lowercase = args.column_text
_lowercase = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
_lowercase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
_lowercase = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
_lowercase = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = args.validation_split
_lowercase = args.train_batch_size
_lowercase = args.valid_batch_size
_lowercase = args.learning_rate
_lowercase = args.adam_epsilon
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
raise NotImplementedError
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 67 | 1 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ) -> List[str]:
super().__init__()
_lowercase = nn.Linear(3 ,4 )
_lowercase = nn.BatchNormad(4 )
_lowercase = nn.Linear(4 ,5 )
def __UpperCAmelCase ( self : int ,__A : Tuple ) -> int:
return self.lineara(self.batchnorm(self.lineara(__A ) ) )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ,__A : Union[str, Any] ,*__A : List[str] ,**__A : Dict ) -> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ,__A : str ,__A : Union[str, Any] ) -> Optional[Any]:
return output + 1
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[Any] ) -> Any:
_lowercase = ModelForTest()
_lowercase = ModelHook()
add_hook_to_module(__A ,__A )
self.assertEqual(test_model._hf_hook ,__A )
self.assertTrue(hasattr(__A ,'_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ ,'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) ,['x'] )
remove_hook_from_module(__A )
self.assertFalse(hasattr(__A ,'_hf_hook' ) )
self.assertFalse(hasattr(__A ,'_old_forward' ) )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
_lowercase = ModelForTest()
_lowercase = ModelHook()
add_hook_to_module(__A ,__A )
add_hook_to_module(__A ,__A ,append=__A )
self.assertEqual(isinstance(test_model._hf_hook ,__A ) ,__A )
self.assertEqual(len(test_model._hf_hook.hooks ) ,2 )
self.assertTrue(hasattr(__A ,'_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ ,'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) ,['x'] )
remove_hook_from_module(__A )
self.assertFalse(hasattr(__A ,'_hf_hook' ) )
self.assertFalse(hasattr(__A ,'_old_forward' ) )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
_lowercase = ModelForTest()
_lowercase = torch.randn(2 ,3 )
_lowercase = test_model(x + 1 )
_lowercase = test_model(x + 2 )
_lowercase = PreForwardHook()
add_hook_to_module(__A ,__A )
_lowercase = test_model(__A )
self.assertTrue(torch.allclose(__A ,__A ,atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_lowercase = PreForwardHook()
add_hook_to_module(__A ,__A )
_lowercase = test_model(__A )
self.assertTrue(torch.allclose(__A ,__A ,atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_lowercase = SequentialHook(PreForwardHook() ,PreForwardHook() )
add_hook_to_module(__A ,__A )
_lowercase = test_model(__A )
assert torch.allclose(__A ,__A ,atol=1e-5 )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
_lowercase = ModelForTest()
_lowercase = torch.randn(2 ,3 )
_lowercase = test_model(__A )
_lowercase = PostForwardHook()
add_hook_to_module(__A ,__A )
_lowercase = test_model(__A )
self.assertTrue(torch.allclose(__A ,output + 1 ,atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_lowercase = PostForwardHook()
add_hook_to_module(__A ,__A )
_lowercase = test_model(__A )
self.assertTrue(torch.allclose(__A ,output + 1 ,atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_lowercase = SequentialHook(PostForwardHook() ,PostForwardHook() )
add_hook_to_module(__A ,__A )
_lowercase = test_model(__A )
assert torch.allclose(__A ,output + 2 ,atol=1e-5 )
def __UpperCAmelCase ( self : Any ) -> str:
_lowercase = ModelForTest()
_lowercase = torch.randn(2 ,3 )
_lowercase = test_model(__A )
_lowercase = PostForwardHook()
add_hook_to_module(__A ,__A )
_lowercase = test_model(__A )
self.assertTrue(torch.allclose(__A ,output + 1 ) )
self.assertTrue(outputa.requires_grad )
_lowercase = True
_lowercase = test_model(__A )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
_lowercase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara ,AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device ,torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device(0 ) )
self.assertEqual(model.lineara.weight.device ,torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_lowercase = torch.randn(2 ,3 )
_lowercase = model(__A )
self.assertEqual(output.device ,torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__A ,AlignDevicesHook(io_same_device=__A ) )
_lowercase = torch.randn(2 ,3 ).to(0 )
_lowercase = model(__A )
self.assertEqual(output.device ,torch.device(0 ) )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
_lowercase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# This will move each submodule on different devices
_lowercase = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara ,AlignDevicesHook(**__A ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(**__A ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(**__A ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowercase = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device ,__A )
_lowercase = torch.randn(2 ,3 )
_lowercase = model(__A )
self.assertEqual(output.device ,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# Now test with buffers included in the offload
_lowercase = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara ,AlignDevicesHook(**__A ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(**__A ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(**__A ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device('meta' ) )
_lowercase = torch.randn(2 ,3 )
_lowercase = model(__A )
self.assertEqual(output.device ,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
_lowercase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# This will move each submodule on different devices
_lowercase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(__A ,execution_device=__A ,offload=__A )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowercase = torch.device(__A )
self.assertEqual(model.batchnorm.running_mean.device ,__A )
_lowercase = torch.randn(2 ,3 )
_lowercase = model(__A )
self.assertEqual(output.device ,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(__A ,execution_device=__A ,offload=__A ,offload_buffers=__A )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device('meta' ) )
_lowercase = torch.randn(2 ,3 )
_lowercase = model(__A )
self.assertEqual(output.device ,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# This will move each submodule on different devices
_lowercase = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
__A ,execution_device=__A ,offload=__A ,weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowercase = torch.device(__A )
self.assertEqual(model.batchnorm.running_mean.device ,__A )
_lowercase = torch.randn(2 ,3 )
_lowercase = model(__A )
self.assertEqual(output.device ,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__A ,execution_device=__A ,offload=__A ,weights_map=model.state_dict() ,offload_buffers=__A ,)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device('meta' ) )
_lowercase = torch.randn(2 ,3 )
_lowercase = model(__A )
self.assertEqual(output.device ,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) ) | 67 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> str:
_lowercase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_lowercase = 1024
_lowercase = 4096
_lowercase = 24
_lowercase = 16
_lowercase = [5, 11, 17, 23]
_lowercase = [256, 512, 1024, 1024]
_lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = [256, 512, 768, 768]
_lowercase = 150
_lowercase = 16
_lowercase = (1, 384, 384)
_lowercase = False
_lowercase = 'project'
if "ade" in checkpoint_url:
_lowercase = True
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = 150
_lowercase = 16
_lowercase = 'huggingface/label-files'
_lowercase = 'ade20k-id2label.json'
_lowercase = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
_lowercase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowercase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowercase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowercase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_lowercase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowercase = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_lowercase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowercase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowercase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowercase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowercase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowercase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowercase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowercase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_lowercase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowercase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowercase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowercase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowercase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowercase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowercase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowercase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowercase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowercase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowercase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowercase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowercase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowercase = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowercase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowercase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowercase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_lowercase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_lowercase = name.replace('..' , '.' )
if "stem.conv" in name:
_lowercase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_lowercase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_lowercase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_lowercase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_lowercase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_lowercase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :int ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[: config.hidden_size, :]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Any , snake_case__ :List[str] ) -> str:
_lowercase , _lowercase = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowercase = torch.load(snake_case__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
_lowercase = state_dict.pop(snake_case__ )
_lowercase = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
_lowercase = DPTForSemanticSegmentation(snake_case__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
_lowercase = 480 if 'ade' in checkpoint_url else 384
_lowercase = DPTImageProcessor(size=snake_case__ )
_lowercase = prepare_img()
_lowercase = image_processor(snake_case__ , return_tensors='pt' )
# forward pass
_lowercase = model(**snake_case__ ).logits if 'ade' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
_lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 67 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
snake_case = logging.get_logger(__name__)
snake_case = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
snake_case = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
snake_case = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''whisper'''
SCREAMING_SNAKE_CASE_ : str = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int ,__A : Dict=5_1865 ,__A : List[str]=80 ,__A : str=6 ,__A : List[str]=4 ,__A : int=6 ,__A : Optional[Any]=4 ,__A : int=1536 ,__A : List[str]=1536 ,__A : Optional[Any]=0.0 ,__A : Union[str, Any]=0.0 ,__A : int=5_0257 ,__A : Dict=True ,__A : int=True ,__A : int="gelu" ,__A : Union[str, Any]=256 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : Optional[int]=0.0 ,__A : List[Any]=0.02 ,__A : Optional[int]=False ,__A : int=1500 ,__A : Union[str, Any]=448 ,__A : Optional[Any]=5_0256 ,__A : List[str]=5_0256 ,__A : Dict=5_0256 ,__A : int=None ,__A : Optional[Any]=[220, 5_0256] ,__A : Optional[Any]=False ,__A : Dict=256 ,__A : Tuple=False ,__A : Union[str, Any]=0.05 ,__A : Optional[Any]=10 ,__A : int=2 ,__A : Optional[int]=0.0 ,__A : Optional[int]=10 ,__A : Any=0 ,__A : Optional[int]=7 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = num_mel_bins
_lowercase = d_model
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = encoder_ffn_dim
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase = max_source_positions
_lowercase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_lowercase = classifier_proj_size
_lowercase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase = apply_spec_augment
_lowercase = mask_time_prob
_lowercase = mask_time_length
_lowercase = mask_time_min_masks
_lowercase = mask_feature_prob
_lowercase = mask_feature_length
_lowercase = mask_feature_min_masks
_lowercase = median_filter_width
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,suppress_tokens=__A ,begin_suppress_tokens=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
_lowercase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
return common_inputs
def __UpperCAmelCase ( self : Tuple ,__A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional["TensorType"] = None ,__A : int = 2_2050 ,__A : float = 5.0 ,__A : int = 220 ,) -> Mapping[str, Any]:
_lowercase = OrderedDict()
_lowercase = OnnxConfig.generate_dummy_inputs(
self ,preprocessor=preprocessor.feature_extractor ,batch_size=__A ,framework=__A ,sampling_rate=__A ,time_duration=__A ,frequency=__A ,)
_lowercase = encoder_inputs['input_features'].shape[2]
_lowercase = encoder_sequence_length // 2 if self.use_past else seq_length
_lowercase = super().generate_dummy_inputs(
preprocessor.tokenizer ,__A ,__A ,__A ,__A )
_lowercase = encoder_inputs.pop('input_features' )
_lowercase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
_lowercase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> float:
return 1e-3 | 67 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 1 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class A_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE_ : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE_ : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE_ : torch.Tensor # [batch_size x 3]
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : float
SCREAMING_SNAKE_CASE_ : float
SCREAMING_SNAKE_CASE_ : Tuple[int]
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return torch.from_numpy(np.array([self.width, self.height] ,dtype=np.floataa ) )
def __UpperCAmelCase ( self : Dict ) -> Dict:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] ,dtype=np.floataa ) )
def __UpperCAmelCase ( self : Tuple ) -> torch.Tensor:
_lowercase = torch.arange(self.height * self.width )
_lowercase = torch.stack(
[
pixel_indices % self.width,
torch.div(__A ,self.width ,rounding_mode='trunc' ),
] ,axis=1 ,)
return coords
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
_lowercase , *_lowercase = self.shape
_lowercase = int(np.prod(__A ) )
_lowercase = self.get_image_coords()
_lowercase = torch.broadcast_to(coords.unsqueeze(0 ) ,[batch_size * inner_batch_size, *coords.shape] )
_lowercase = self.get_camera_rays(__A )
_lowercase = rays.view(__A ,inner_batch_size * self.height * self.width ,2 ,3 )
return rays
def __UpperCAmelCase ( self : List[Any] ,__A : torch.Tensor ) -> torch.Tensor:
_lowercase , *_lowercase , _lowercase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_lowercase = coords.view(__A ,-1 ,2 )
_lowercase = self.resolution()
_lowercase = self.fov()
_lowercase = (flat.float() / (res - 1)) * 2 - 1
_lowercase = fracs * torch.tan(fov / 2 )
_lowercase = fracs.view(__A ,-1 ,2 )
_lowercase = (
self.z.view(__A ,1 ,3 )
+ self.x.view(__A ,1 ,3 ) * fracs[:, :, :1]
+ self.y.view(__A ,1 ,3 ) * fracs[:, :, 1:]
)
_lowercase = directions / directions.norm(dim=-1 ,keepdim=__A )
_lowercase = torch.stack(
[
torch.broadcast_to(self.origin.view(__A ,1 ,3 ) ,[batch_size, directions.shape[1], 3] ),
directions,
] ,dim=2 ,)
return rays.view(__A ,*__A ,2 ,3 )
def __UpperCAmelCase ( self : Optional[Any] ,__A : int ,__A : int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin ,x=self.x ,y=self.y ,z=self.z ,width=__A ,height=__A ,x_fov=self.x_fov ,y_fov=self.y_fov ,)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> DifferentiableProjectiveCamera:
_lowercase = []
_lowercase = []
_lowercase = []
_lowercase = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_lowercase = np.array([np.sin(snake_case__ ), np.cos(snake_case__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_lowercase = -z * 4
_lowercase = np.array([np.cos(snake_case__ ), -np.sin(snake_case__ ), 0.0] )
_lowercase = np.cross(snake_case__ , snake_case__ )
origins.append(snake_case__ )
xs.append(snake_case__ )
ys.append(snake_case__ )
zs.append(snake_case__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(snake_case__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(snake_case__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(snake_case__ , axis=0 ) ).float() , width=snake_case__ , height=snake_case__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(snake_case__ )) , ) | 67 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 67 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[jnp.ndarray] = None
SCREAMING_SNAKE_CASE_ : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def __UpperCAmelCase ( cls : Optional[int] ) -> Optional[int]:
return cls()
@dataclass
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : jnp.ndarray
SCREAMING_SNAKE_CASE_ : jnp.ndarray
SCREAMING_SNAKE_CASE_ : KarrasVeSchedulerState
class A_ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : str ) -> Dict:
return True
@register_to_config
def __init__( self : List[Any] ,__A : float = 0.02 ,__A : float = 100 ,__A : float = 1.007 ,__A : float = 80 ,__A : float = 0.05 ,__A : float = 50 ,) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
return KarrasVeSchedulerState.create()
def __UpperCAmelCase ( self : List[str] ,__A : KarrasVeSchedulerState ,__A : int ,__A : Tuple = () ) -> KarrasVeSchedulerState:
_lowercase = jnp.arange(0 ,__A )[::-1].copy()
_lowercase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A ,schedule=jnp.array(__A ,dtype=jnp.floataa ) ,timesteps=__A ,)
def __UpperCAmelCase ( self : List[Any] ,__A : KarrasVeSchedulerState ,__A : jnp.ndarray ,__A : float ,__A : random.KeyArray ,) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
_lowercase = min(self.config.s_churn / state.num_inference_steps ,2**0.5 - 1 )
else:
_lowercase = 0
# sample eps ~ N(0, S_noise^2 * I)
_lowercase = random.split(__A ,num=1 )
_lowercase = self.config.s_noise * random.normal(key=__A ,shape=sample.shape )
_lowercase = sigma + gamma * sigma
_lowercase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __UpperCAmelCase ( self : Union[str, Any] ,__A : KarrasVeSchedulerState ,__A : jnp.ndarray ,__A : float ,__A : float ,__A : jnp.ndarray ,__A : bool = True ,) -> Union[FlaxKarrasVeOutput, Tuple]:
_lowercase = sample_hat + sigma_hat * model_output
_lowercase = (sample_hat - pred_original_sample) / sigma_hat
_lowercase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A ,derivative=__A ,state=__A )
def __UpperCAmelCase ( self : List[Any] ,__A : KarrasVeSchedulerState ,__A : jnp.ndarray ,__A : float ,__A : float ,__A : jnp.ndarray ,__A : jnp.ndarray ,__A : jnp.ndarray ,__A : bool = True ,) -> Union[FlaxKarrasVeOutput, Tuple]:
_lowercase = sample_prev + sigma_prev * model_output
_lowercase = (sample_prev - pred_original_sample) / sigma_prev
_lowercase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A ,derivative=__A ,state=__A )
def __UpperCAmelCase ( self : int ,__A : KarrasVeSchedulerState ,__A : Optional[Any] ,__A : Optional[int] ,__A : Tuple ) -> Optional[int]:
raise NotImplementedError() | 67 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''transfo-xl'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''mems''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] ,__A : Union[str, Any]=26_7735 ,__A : List[Any]=[2_0000, 4_0000, 20_0000] ,__A : Dict=1024 ,__A : str=1024 ,__A : Dict=16 ,__A : int=64 ,__A : Dict=4096 ,__A : List[Any]=4 ,__A : Optional[int]=False ,__A : Union[str, Any]=18 ,__A : Tuple=1600 ,__A : str=1000 ,__A : Dict=True ,__A : Dict=True ,__A : int=0 ,__A : Optional[int]=-1 ,__A : int=True ,__A : List[str]=0.1 ,__A : Optional[int]=0.0 ,__A : str=True ,__A : Tuple="normal" ,__A : Union[str, Any]=0.01 ,__A : Tuple=0.01 ,__A : Any=0.02 ,__A : Union[str, Any]=1e-5 ,__A : List[Any]=0 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = []
self.cutoffs.extend(__A )
if proj_share_all_but_first:
_lowercase = [False] + [True] * len(self.cutoffs )
else:
_lowercase = [False] + [False] * len(self.cutoffs )
_lowercase = d_model
_lowercase = d_embed
_lowercase = d_head
_lowercase = d_inner
_lowercase = div_val
_lowercase = pre_lnorm
_lowercase = n_layer
_lowercase = n_head
_lowercase = mem_len
_lowercase = same_length
_lowercase = attn_type
_lowercase = clamp_len
_lowercase = sample_softmax
_lowercase = adaptive
_lowercase = dropout
_lowercase = dropatt
_lowercase = untie_r
_lowercase = init
_lowercase = init_range
_lowercase = proj_init_std
_lowercase = init_std
_lowercase = layer_norm_epsilon
super().__init__(eos_token_id=__A ,**__A )
@property
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self : Any ,__A : Dict ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 67 | 1 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 8 ) -> str:
_lowercase = ascii_letters + digits + punctuation
return "".join(secrets.choice(snake_case__ ) for _ in range(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :int ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(snake_case__ )
_lowercase = i // 3
_lowercase = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowercase = (
chars_incl
+ random(snake_case__ , quotient + remainder )
+ random(snake_case__ , snake_case__ )
+ random(snake_case__ , snake_case__ )
)
_lowercase = list(snake_case__ )
shuffle(snake_case__ )
return "".join(snake_case__ )
# random is a generalised function for letters, characters and numbers
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :int ) -> str:
return "".join(secrets.choice(snake_case__ ) for _ in range(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict , snake_case__ :str ) -> Optional[Any]:
pass # Put your code here...
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any , snake_case__ :Dict ) -> Any:
pass # Put your code here...
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :int ) -> Dict:
pass # Put your code here...
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :int = 8 ) -> bool:
if len(snake_case__ ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowercase = any(char in ascii_uppercase for char in password )
_lowercase = any(char in ascii_lowercase for char in password )
_lowercase = any(char in digits for char in password )
_lowercase = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
_lowercase = int(input('Please indicate the max length of your password: ' ).strip() )
_lowercase = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(snake_case__ ) )
print(
'Alternative Password generated:' , alternative_password_generator(snake_case__ , snake_case__ ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main() | 67 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''dpr'''
def __init__( self : int ,__A : Union[str, Any]=3_0522 ,__A : Optional[int]=768 ,__A : int=12 ,__A : List[Any]=12 ,__A : Optional[Any]=3072 ,__A : Union[str, Any]="gelu" ,__A : Union[str, Any]=0.1 ,__A : List[Any]=0.1 ,__A : str=512 ,__A : List[str]=2 ,__A : Tuple=0.02 ,__A : Tuple=1e-12 ,__A : List[Any]=0 ,__A : List[str]="absolute" ,__A : int = 0 ,**__A : int ,) -> Tuple:
super().__init__(pad_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = projection_dim
_lowercase = position_embedding_type | 67 | 1 |
from math import isqrt
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__ ) + 1 ) )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 10**6 ) -> int:
_lowercase = 0
_lowercase = 1
_lowercase = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""") | 67 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main() | 67 | 1 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''align_text_model'''
def __init__( self : str ,__A : str=3_0522 ,__A : List[str]=768 ,__A : Any=12 ,__A : int=12 ,__A : List[Any]=3072 ,__A : Union[str, Any]="gelu" ,__A : List[Any]=0.1 ,__A : Any=0.1 ,__A : Optional[int]=512 ,__A : str=2 ,__A : List[Any]=0.02 ,__A : Dict=1e-12 ,__A : Optional[Any]=0 ,__A : List[Any]="absolute" ,__A : List[Any]=True ,**__A : List[str] ,) -> Union[str, Any]:
super().__init__(**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = position_embedding_type
_lowercase = use_cache
_lowercase = pad_token_id
@classmethod
def __UpperCAmelCase ( cls : Tuple ,__A : Union[str, os.PathLike] ,**__A : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowercase , _lowercase = cls.get_config_dict(__A ,**__A )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
_lowercase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__A ,**__A )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = '''align_vision_model'''
def __init__( self : int ,__A : int = 3 ,__A : int = 600 ,__A : float = 2.0 ,__A : float = 3.1 ,__A : int = 8 ,__A : List[int] = [3, 3, 5, 3, 5, 5, 3] ,__A : List[int] = [32, 16, 24, 40, 80, 112, 192] ,__A : List[int] = [16, 24, 40, 80, 112, 192, 320] ,__A : List[int] = [] ,__A : List[int] = [1, 2, 2, 2, 1, 2, 1] ,__A : List[int] = [1, 2, 2, 3, 3, 4, 1] ,__A : List[int] = [1, 6, 6, 6, 6, 6, 6] ,__A : float = 0.25 ,__A : str = "swish" ,__A : int = 2560 ,__A : str = "mean" ,__A : float = 0.02 ,__A : float = 0.001 ,__A : float = 0.99 ,__A : float = 0.2 ,**__A : Tuple ,) -> Tuple:
super().__init__(**__A )
_lowercase = num_channels
_lowercase = image_size
_lowercase = width_coefficient
_lowercase = depth_coefficient
_lowercase = depth_divisor
_lowercase = kernel_sizes
_lowercase = in_channels
_lowercase = out_channels
_lowercase = depthwise_padding
_lowercase = strides
_lowercase = num_block_repeats
_lowercase = expand_ratios
_lowercase = squeeze_expansion_ratio
_lowercase = hidden_act
_lowercase = hidden_dim
_lowercase = pooling_type
_lowercase = initializer_range
_lowercase = batch_norm_eps
_lowercase = batch_norm_momentum
_lowercase = drop_connect_rate
_lowercase = sum(__A ) * 4
@classmethod
def __UpperCAmelCase ( cls : int ,__A : Union[str, os.PathLike] ,**__A : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowercase , _lowercase = cls.get_config_dict(__A ,**__A )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
_lowercase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__A ,**__A )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = '''align'''
SCREAMING_SNAKE_CASE_ : Dict = True
def __init__( self : int ,__A : Optional[int]=None ,__A : Dict=None ,__A : Optional[int]=640 ,__A : Union[str, Any]=1.0 ,__A : Dict=0.02 ,**__A : Dict ,) -> List[str]:
super().__init__(**__A )
if text_config is None:
_lowercase = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
_lowercase = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
_lowercase = AlignTextConfig(**__A )
_lowercase = AlignVisionConfig(**__A )
_lowercase = projection_dim
_lowercase = temperature_init_value
_lowercase = initializer_range
@classmethod
def __UpperCAmelCase ( cls : Dict ,__A : AlignTextConfig ,__A : AlignVisionConfig ,**__A : Optional[int] ) -> int:
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**__A )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = copy.deepcopy(self.__dict__ )
_lowercase = self.text_config.to_dict()
_lowercase = self.vision_config.to_dict()
_lowercase = self.__class__.model_type
return output | 67 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''big_bird'''
def __init__( self : str ,__A : Union[str, Any]=5_0358 ,__A : Any=768 ,__A : List[str]=12 ,__A : Union[str, Any]=12 ,__A : int=3072 ,__A : Tuple="gelu_new" ,__A : Any=0.1 ,__A : Optional[Any]=0.1 ,__A : Tuple=4096 ,__A : int=2 ,__A : Union[str, Any]=0.02 ,__A : Optional[int]=1e-12 ,__A : List[str]=True ,__A : List[Any]=0 ,__A : Optional[Any]=1 ,__A : Optional[int]=2 ,__A : Optional[int]=66 ,__A : Tuple="block_sparse" ,__A : Optional[int]=True ,__A : Optional[int]=False ,__A : Tuple=64 ,__A : str=3 ,__A : Optional[int]=None ,**__A : Dict ,) -> Union[str, Any]:
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,sep_token_id=__A ,**__A ,)
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
_lowercase = use_cache
_lowercase = rescale_embeddings
_lowercase = attention_type
_lowercase = use_bias
_lowercase = block_size
_lowercase = num_random_blocks
_lowercase = classifier_dropout
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 67 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list , snake_case__ :int ) -> Dict:
# Checks if the entire collection has been sorted
if len(snake_case__ ) <= 1 or n <= 1:
return
insert_next(snake_case__ , n - 1 )
rec_insertion_sort(snake_case__ , n - 1 )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list , snake_case__ :int ) -> List[Any]:
# Checks order between adjacent elements
if index >= len(snake_case__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_lowercase , _lowercase = (
collection[index],
collection[index - 1],
)
insert_next(snake_case__ , index + 1 )
if __name__ == "__main__":
snake_case = input("""Enter integers separated by spaces: """)
snake_case = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list) | 67 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list:
_lowercase = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
_lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 | 1 |
snake_case = """Alexander Joslin"""
import operator as op
from .stack import Stack
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
_lowercase = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
_lowercase = Stack()
_lowercase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(snake_case__ ) )
elif i in operators:
# RULE 2
operator_stack.push(snake_case__ )
elif i == ")":
# RULE 4
_lowercase = operator_stack.peek()
operator_stack.pop()
_lowercase = operand_stack.peek()
operand_stack.pop()
_lowercase = operand_stack.peek()
operand_stack.pop()
_lowercase = operators[opr](snake_case__ , snake_case__ )
operand_stack.push(snake_case__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
snake_case = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""") | 67 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff | 67 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 |
from manim import *
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = Rectangle(height=0.5 ,width=0.5 )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowercase = Rectangle(height=0.25 ,width=0.25 )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('CPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(4 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('GPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
gpu.move_to([-1, -1, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('Model' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
model.move_to([3, -1.0, 0] )
self.add(__A )
_lowercase = []
_lowercase = []
for i, rect in enumerate(__A ):
_lowercase = fill.copy().set_fill(__A ,opacity=0.8 )
target.move_to(__A )
model_arr.append(__A )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__A ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__A )
self.add(*__A ,*__A )
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('Disk' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
disk.move_to([-4, -1.25, 0] )
self.add(__A ,__A )
_lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__A ,__A )
_lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__A ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__A )
_lowercase = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ) )
_lowercase = Square(0.3 )
input.set_fill(__A ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__A ,buff=0.5 )
self.play(Write(__A ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__A ,buff=0.02 )
self.play(MoveToTarget(__A ) )
self.play(FadeOut(__A ) )
_lowercase = Arrow(start=__A ,end=__A ,color=__A ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__A ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowercase = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) )
_lowercase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__A ) ,Circumscribe(model_arr[0] ,color=__A ,**__A ) ,Circumscribe(model_cpu_arr[0] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__A ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_lowercase = AnimationGroup(
FadeOut(__A ,run_time=0.5 ) ,MoveToTarget(__A ,run_time=0.5 ) ,FadeIn(__A ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowercase = 0.7
self.play(
Circumscribe(model_arr[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,Circumscribe(model_arr[i + 1] ,color=__A ,**__A ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__A ,**__A ) ,Circumscribe(cpu_left_col_base[-1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowercase = a_c
_lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__A ) ,FadeOut(__A ,run_time=0.5 ) ,)
_lowercase = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) ,MoveToTarget(__A ) )
self.wait() | 67 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Tuple:
_lowercase = 384
_lowercase = 7
if "tiny" in model_name:
_lowercase = 96
_lowercase = (2, 2, 6, 2)
_lowercase = (3, 6, 12, 24)
elif "small" in model_name:
_lowercase = 96
_lowercase = (2, 2, 18, 2)
_lowercase = (3, 6, 12, 24)
elif "base" in model_name:
_lowercase = 128
_lowercase = (2, 2, 18, 2)
_lowercase = (4, 8, 16, 32)
_lowercase = 12
_lowercase = 512
elif "large" in model_name:
_lowercase = 192
_lowercase = (2, 2, 18, 2)
_lowercase = (6, 12, 24, 48)
_lowercase = 12
_lowercase = 768
# set label information
_lowercase = 150
_lowercase = 'huggingface/label-files'
_lowercase = 'ade20k-id2label.json'
_lowercase = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = SwinConfig(
embed_dim=snake_case__ , depths=snake_case__ , num_heads=snake_case__ , window_size=snake_case__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
_lowercase = UperNetConfig(
backbone_config=snake_case__ , auxiliary_in_channels=snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ , )
return config
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] ) -> List[Any]:
_lowercase = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any , snake_case__ :List[Any] , snake_case__ :List[str] ) -> Any:
_lowercase = dct.pop(snake_case__ )
_lowercase = val
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :Tuple ) -> Union[str, Any]:
_lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
_lowercase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[:dim, :]
_lowercase = in_proj_bias[: dim]
_lowercase = in_proj_weight[
dim : dim * 2, :
]
_lowercase = in_proj_bias[
dim : dim * 2
]
_lowercase = in_proj_weight[
-dim :, :
]
_lowercase = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Optional[Any]:
_lowercase , _lowercase = x.shape
_lowercase = x.reshape(snake_case__ , 4 , in_channel // 4 )
_lowercase = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(snake_case__ , snake_case__ )
return x
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> List[Any]:
_lowercase , _lowercase = x.shape
_lowercase = x.reshape(snake_case__ , in_channel // 4 , 4 )
_lowercase = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(snake_case__ , snake_case__ )
return x
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Tuple:
_lowercase = x.shape[0]
_lowercase = x.reshape(4 , in_channel // 4 )
_lowercase = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(snake_case__ )
return x
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
_lowercase = x.shape[0]
_lowercase = x.reshape(in_channel // 4 , 4 )
_lowercase = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(snake_case__ )
return x
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :str , snake_case__ :Optional[Any] ) -> Optional[Any]:
_lowercase = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
_lowercase = model_name_to_url[model_name]
_lowercase = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' , file_name=snake_case__ )[
'state_dict'
]
for name, param in state_dict.items():
print(snake_case__ , param.shape )
_lowercase = get_upernet_config(snake_case__ )
_lowercase = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowercase = state_dict.pop(snake_case__ )
if "bn" in key:
_lowercase = key.replace('bn' , 'batch_norm' )
_lowercase = val
# rename keys
_lowercase = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_lowercase = reverse_correct_unfold_reduction_order(snake_case__ )
if "norm" in key:
_lowercase = reverse_correct_unfold_norm_order(snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
_lowercase = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
_lowercase = SegformerImageProcessor()
_lowercase = processor(snake_case__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
_lowercase = model(snake_case__ )
_lowercase = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_lowercase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
_lowercase = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
_lowercase = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
_lowercase = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[F"""upernet-swin-{size}""" for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 67 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Any ,__A : Tuple=None ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : int="resnet50" ,__A : int=3 ,__A : List[Any]=32 ,__A : Tuple=3 ,__A : List[Any]=True ,__A : Tuple=True ,) -> Any:
_lowercase = parent
_lowercase = out_indices if out_indices is not None else [4]
_lowercase = stage_names
_lowercase = out_features
_lowercase = backbone
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = use_pretrained_backbone
_lowercase = is_training
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __UpperCAmelCase ( self : Any ,__A : Any ,__A : Dict ) -> Union[str, Any]:
_lowercase = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = TimmBackboneModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : int ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = 'resnet18'
_lowercase = 'microsoft/resnet-18'
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A )
_lowercase = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A ,out_indices=[1, 2, 3] )
_lowercase = AutoBackbone.from_pretrained(__A ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : int ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
_lowercase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase = self.all_model_classes[0]
_lowercase = model_class(__A )
model.to(__A )
_lowercase = self._prepare_for_class(__A ,__A )
_lowercase = model(**__A )
_lowercase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase = copy.deepcopy(__A )
_lowercase = None
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
_lowercase = copy.deepcopy(__A )
_lowercase = False
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A ) | 67 | 1 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
snake_case = TypeVar("""T""")
class A_ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] ,__A : bool = True ) -> None:
_lowercase = {} # dictionary of lists
_lowercase = directed
def __UpperCAmelCase ( self : Dict ,__A : T ,__A : T ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
self.adj_list[destination_vertex].append(__A )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
_lowercase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__A )
_lowercase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowercase = [destination_vertex]
_lowercase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
_lowercase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowercase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowercase = [destination_vertex]
_lowercase = []
return self
def __repr__( self : Tuple ) -> str:
return pformat(self.adj_list ) | 67 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 1 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = """Hello world! cécé herlolip"""
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str , snake_case__ :bool ) -> Any:
_lowercase = FairseqRobertaModel.from_pretrained(snake_case__ )
roberta.eval() # disable dropout
_lowercase = roberta.model.encoder.sentence_encoder
_lowercase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
_lowercase = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , snake_case__ )
_lowercase = XLMRobertaXLForSequenceClassification(snake_case__ ) if classification_head else XLMRobertaXLForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowercase = roberta_sent_encoder.embed_tokens.weight
_lowercase = roberta_sent_encoder.embed_positions.weight
_lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_lowercase = roberta_sent_encoder.layer_norm.weight
_lowercase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowercase = model.roberta.encoder.layer[i]
_lowercase = roberta_sent_encoder.layers[i]
_lowercase = layer.attention
_lowercase = roberta_layer.self_attn_layer_norm.weight
_lowercase = roberta_layer.self_attn_layer_norm.bias
# self attention
_lowercase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_lowercase = roberta_layer.self_attn.q_proj.weight
_lowercase = roberta_layer.self_attn.q_proj.bias
_lowercase = roberta_layer.self_attn.k_proj.weight
_lowercase = roberta_layer.self_attn.k_proj.bias
_lowercase = roberta_layer.self_attn.v_proj.weight
_lowercase = roberta_layer.self_attn.v_proj.bias
# self-attention output
_lowercase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_lowercase = roberta_layer.self_attn.out_proj.weight
_lowercase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_lowercase = roberta_layer.final_layer_norm.weight
_lowercase = roberta_layer.final_layer_norm.bias
# intermediate
_lowercase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_lowercase = roberta_layer.fca.weight
_lowercase = roberta_layer.fca.bias
# output
_lowercase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_lowercase = roberta_layer.fca.weight
_lowercase = roberta_layer.fca.bias
# end of layer
if classification_head:
_lowercase = roberta.model.classification_heads['mnli'].dense.weight
_lowercase = roberta.model.classification_heads['mnli'].dense.bias
_lowercase = roberta.model.classification_heads['mnli'].out_proj.weight
_lowercase = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
_lowercase = roberta.model.encoder.lm_head.dense.weight
_lowercase = roberta.model.encoder.lm_head.dense.bias
_lowercase = roberta.model.encoder.lm_head.layer_norm.weight
_lowercase = roberta.model.encoder.lm_head.layer_norm.bias
_lowercase = roberta.model.encoder.lm_head.weight
_lowercase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowercase = roberta.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
_lowercase = model(snake_case__ )[0]
if classification_head:
_lowercase = roberta.model.classification_heads['mnli'](roberta.extract_features(snake_case__ ) )
else:
_lowercase = roberta.model(snake_case__ )[0]
print(our_output.shape , their_output.shape )
_lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
_lowercase = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
snake_case = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
) | 67 |
snake_case = {str(digit): digit**5 for digit in range(1_0)}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case__ ) )
if __name__ == "__main__":
print(solution()) | 67 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ['''pixel_values''']
def __init__( self : Optional[int] ,__A : bool = True ,__A : Dict[str, int] = None ,__A : PILImageResampling = PILImageResampling.BICUBIC ,__A : bool = True ,__A : Dict[str, int] = None ,__A : bool = True ,__A : Union[int, float] = 1 / 255 ,__A : bool = True ,__A : Optional[Union[float, List[float]]] = None ,__A : Optional[Union[float, List[float]]] = None ,__A : bool = True ,**__A : Optional[int] ,) -> None:
super().__init__(**__A )
_lowercase = size if size is not None else {'shortest_edge': 224}
_lowercase = get_size_dict(__A ,default_to_square=__A )
_lowercase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowercase = get_size_dict(__A ,default_to_square=__A ,param_name='crop_size' )
_lowercase = do_resize
_lowercase = size
_lowercase = resample
_lowercase = do_center_crop
_lowercase = crop_size
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_normalize
_lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
_lowercase = do_convert_rgb
def __UpperCAmelCase ( self : List[Any] ,__A : np.ndarray ,__A : Dict[str, int] ,__A : PILImageResampling = PILImageResampling.BICUBIC ,__A : Optional[Union[str, ChannelDimension]] = None ,**__A : Optional[Any] ,) -> np.ndarray:
_lowercase = get_size_dict(__A ,default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_lowercase = get_resize_output_image_size(__A ,size=size['shortest_edge'] ,default_to_square=__A )
return resize(__A ,size=__A ,resample=__A ,data_format=__A ,**__A )
def __UpperCAmelCase ( self : List[Any] ,__A : np.ndarray ,__A : Dict[str, int] ,__A : Optional[Union[str, ChannelDimension]] = None ,**__A : str ,) -> np.ndarray:
_lowercase = get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__A ,size=(size['height'], size['width']) ,data_format=__A ,**__A )
def __UpperCAmelCase ( self : Dict ,__A : np.ndarray ,__A : Union[int, float] ,__A : Optional[Union[str, ChannelDimension]] = None ,**__A : Tuple ,) -> Union[str, Any]:
return rescale(__A ,scale=__A ,data_format=__A ,**__A )
def __UpperCAmelCase ( self : Optional[Any] ,__A : np.ndarray ,__A : Union[float, List[float]] ,__A : Union[float, List[float]] ,__A : Optional[Union[str, ChannelDimension]] = None ,**__A : Tuple ,) -> np.ndarray:
return normalize(__A ,mean=__A ,std=__A ,data_format=__A ,**__A )
def __UpperCAmelCase ( self : Dict ,__A : ImageInput ,__A : bool = None ,__A : Dict[str, int] = None ,__A : PILImageResampling = None ,__A : bool = None ,__A : int = None ,__A : bool = None ,__A : float = None ,__A : bool = None ,__A : Optional[Union[float, List[float]]] = None ,__A : Optional[Union[float, List[float]]] = None ,__A : bool = None ,__A : Optional[Union[str, TensorType]] = None ,__A : Optional[ChannelDimension] = ChannelDimension.FIRST ,**__A : Union[str, Any] ,) -> PIL.Image.Image:
_lowercase = do_resize if do_resize is not None else self.do_resize
_lowercase = size if size is not None else self.size
_lowercase = get_size_dict(__A ,param_name='size' ,default_to_square=__A )
_lowercase = resample if resample is not None else self.resample
_lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase = crop_size if crop_size is not None else self.crop_size
_lowercase = get_size_dict(__A ,param_name='crop_size' ,default_to_square=__A )
_lowercase = do_rescale if do_rescale is not None else self.do_rescale
_lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase = do_normalize if do_normalize is not None else self.do_normalize
_lowercase = image_mean if image_mean is not None else self.image_mean
_lowercase = image_std if image_std is not None else self.image_std
_lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowercase = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowercase = [convert_to_rgb(__A ) for image in images]
# All transformations expect numpy arrays.
_lowercase = [to_numpy_array(__A ) for image in images]
if do_resize:
_lowercase = [self.resize(image=__A ,size=__A ,resample=__A ) for image in images]
if do_center_crop:
_lowercase = [self.center_crop(image=__A ,size=__A ) for image in images]
if do_rescale:
_lowercase = [self.rescale(image=__A ,scale=__A ) for image in images]
if do_normalize:
_lowercase = [self.normalize(image=__A ,mean=__A ,std=__A ) for image in images]
_lowercase = [to_channel_dimension_format(__A ,__A ) for image in images]
_lowercase = {'pixel_values': images}
return BatchFeature(data=__A ,tensor_type=__A ) | 67 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> list[int]:
_lowercase = str(snake_case__ )
_lowercase = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 11 ) -> list[int]:
_lowercase = []
_lowercase = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
_lowercase = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""") | 67 | 1 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : Tuple ,__A : Any=99 ,__A : Any=13 ,__A : Dict=7 ,__A : List[Any]=9 ,__A : Dict=True ,__A : Any=True ,__A : Tuple=False ,__A : str=32 ,__A : int=5 ,__A : List[str]=4 ,__A : Optional[Any]=37 ,__A : int=8 ,__A : Any=0.1 ,__A : Dict=0.002 ,__A : Union[str, Any]=1 ,__A : Optional[Any]=0 ,__A : int=0 ,__A : Tuple=None ,__A : str=None ,) -> List[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = encoder_seq_length
_lowercase = decoder_seq_length
# For common tests
_lowercase = self.decoder_seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = d_ff
_lowercase = relative_attention_num_buckets
_lowercase = dropout_rate
_lowercase = initializer_factor
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = decoder_start_token_id
_lowercase = None
_lowercase = decoder_layers
def __UpperCAmelCase ( self : Dict ) -> Dict:
return TaConfig.from_pretrained('google/umt5-base' )
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ,__A : int ,__A : str ,__A : List[str]=None ,__A : List[str]=None ,__A : Any=None ,__A : List[Any]=None ,__A : str=None ,) -> Tuple:
if attention_mask is None:
_lowercase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowercase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowercase = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=__A )
if decoder_head_mask is None:
_lowercase = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=__A )
if cross_attn_head_mask is None:
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowercase = input_ids.clamp(self.pad_token_id + 1 )
_lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowercase = self.get_config()
_lowercase = config.num_attention_heads
_lowercase = self.prepare_inputs_dict(__A ,__A ,__A )
return config, input_dict
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase , _lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Dict ) -> Any:
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : List[str] ,__A : Dict ,__A : List[str] ,__A : List[Any] ,__A : Tuple ,__A : int ,) -> Tuple:
_lowercase = UMTaModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
input_ids=__A ,decoder_input_ids=__A ,attention_mask=__A ,decoder_attention_mask=__A ,)
_lowercase = model(input_ids=__A ,decoder_input_ids=__A )
_lowercase = result.last_hidden_state
_lowercase = result.past_key_values
_lowercase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def __UpperCAmelCase ( self : List[Any] ,__A : Tuple ,__A : int ,__A : Any ,__A : Tuple ,__A : Any ,__A : Optional[int] ,) -> List[str]:
_lowercase = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
_lowercase = model(__A ,use_cache=__A )
_lowercase = model(__A )
_lowercase = model(__A ,use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase = model(__A )['last_hidden_state']
_lowercase = model(__A ,past_key_values=__A )['last_hidden_state']
# select random slice
_lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1e-3 ) )
def __UpperCAmelCase ( self : Any ,__A : List[str] ,__A : List[str] ,) -> int:
_lowercase = UMTaModel(config=__A ).to(__A ).half().eval()
_lowercase = model(**__A )['last_hidden_state']
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ : Dict = [0.8, 0.9]
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=__A ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = config_and_inputs[0]
_lowercase = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
_lowercase = {
'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=__A ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
}
for attn_name, (name, mask) in zip(__A ,head_masking.items() ):
_lowercase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=__A )
_lowercase = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=__A ,return_dict_in_generate=__A ,**__A ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __UpperCAmelCase ( self : str ) -> List[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=__A ).to(__A )
_lowercase = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=__A ,legacy=__A )
_lowercase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowercase = tokenizer(__A ,return_tensors='pt' ,padding=__A ).input_ids
# fmt: off
_lowercase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A ,__A )
_lowercase = model.generate(input_ids.to(__A ) )
_lowercase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowercase = tokenizer.batch_decode(__A )
self.assertEqual(__A ,__A ) | 67 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : Tuple ,__A : Any=99 ,__A : Any=13 ,__A : Dict=7 ,__A : List[Any]=9 ,__A : Dict=True ,__A : Any=True ,__A : Tuple=False ,__A : str=32 ,__A : int=5 ,__A : List[str]=4 ,__A : Optional[Any]=37 ,__A : int=8 ,__A : Any=0.1 ,__A : Dict=0.002 ,__A : Union[str, Any]=1 ,__A : Optional[Any]=0 ,__A : int=0 ,__A : Tuple=None ,__A : str=None ,) -> List[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = encoder_seq_length
_lowercase = decoder_seq_length
# For common tests
_lowercase = self.decoder_seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = d_ff
_lowercase = relative_attention_num_buckets
_lowercase = dropout_rate
_lowercase = initializer_factor
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = decoder_start_token_id
_lowercase = None
_lowercase = decoder_layers
def __UpperCAmelCase ( self : Dict ) -> Dict:
return TaConfig.from_pretrained('google/umt5-base' )
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ,__A : int ,__A : str ,__A : List[str]=None ,__A : List[str]=None ,__A : Any=None ,__A : List[Any]=None ,__A : str=None ,) -> Tuple:
if attention_mask is None:
_lowercase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowercase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowercase = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=__A )
if decoder_head_mask is None:
_lowercase = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=__A )
if cross_attn_head_mask is None:
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowercase = input_ids.clamp(self.pad_token_id + 1 )
_lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowercase = self.get_config()
_lowercase = config.num_attention_heads
_lowercase = self.prepare_inputs_dict(__A ,__A ,__A )
return config, input_dict
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase , _lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Dict ) -> Any:
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : List[str] ,__A : Dict ,__A : List[str] ,__A : List[Any] ,__A : Tuple ,__A : int ,) -> Tuple:
_lowercase = UMTaModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
input_ids=__A ,decoder_input_ids=__A ,attention_mask=__A ,decoder_attention_mask=__A ,)
_lowercase = model(input_ids=__A ,decoder_input_ids=__A )
_lowercase = result.last_hidden_state
_lowercase = result.past_key_values
_lowercase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def __UpperCAmelCase ( self : List[Any] ,__A : Tuple ,__A : int ,__A : Any ,__A : Tuple ,__A : Any ,__A : Optional[int] ,) -> List[str]:
_lowercase = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
_lowercase = model(__A ,use_cache=__A )
_lowercase = model(__A )
_lowercase = model(__A ,use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase = model(__A )['last_hidden_state']
_lowercase = model(__A ,past_key_values=__A )['last_hidden_state']
# select random slice
_lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1e-3 ) )
def __UpperCAmelCase ( self : Any ,__A : List[str] ,__A : List[str] ,) -> int:
_lowercase = UMTaModel(config=__A ).to(__A ).half().eval()
_lowercase = model(**__A )['last_hidden_state']
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ : Dict = [0.8, 0.9]
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=__A ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = config_and_inputs[0]
_lowercase = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
_lowercase = {
'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=__A ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
}
for attn_name, (name, mask) in zip(__A ,head_masking.items() ):
_lowercase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=__A )
_lowercase = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=__A ,return_dict_in_generate=__A ,**__A ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __UpperCAmelCase ( self : str ) -> List[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=__A ).to(__A )
_lowercase = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=__A ,legacy=__A )
_lowercase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowercase = tokenizer(__A ,return_tensors='pt' ,padding=__A ).input_ids
# fmt: off
_lowercase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A ,__A )
_lowercase = model.generate(input_ids.to(__A ) )
_lowercase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowercase = tokenizer.batch_decode(__A )
self.assertEqual(__A ,__A ) | 67 | 1 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
_lowercase = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 100 ) -> int:
_lowercase = 1
_lowercase = 2
for i in range(2 , max_n + 1 ):
_lowercase = pre_numerator
_lowercase = 2 * i // 3 if i % 3 == 0 else 1
_lowercase = cur_numerator
_lowercase = e_cont * pre_numerator + temp
return sum_digits(snake_case__ )
if __name__ == "__main__":
print(F"""{solution() = }""") | 67 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=__A ,)
assert hasattr(self ,'env' )
def __UpperCAmelCase ( self : str ,__A : Tuple ) -> int:
# configuration for running training on smdistributed Model Parallel
_lowercase = {
'enabled': True,
'processes_per_host': 8,
}
_lowercase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowercase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowercase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__A ,instance_type=self.instance_type ,debugger_hook_config=__A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=__A ,py_version='py36' ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Any ) -> Optional[Any]:
TrainingJobAnalytics(__A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Optional[Any]:
# create estimator
_lowercase = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__A ) | 67 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case = logging.get_logger(__name__)
snake_case = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''instructblip_vision_model'''
def __init__( self : Tuple ,__A : Union[str, Any]=1408 ,__A : Optional[Any]=6144 ,__A : Tuple=39 ,__A : Any=16 ,__A : Any=224 ,__A : List[Any]=14 ,__A : Dict="gelu" ,__A : str=1e-6 ,__A : Optional[int]=0.0 ,__A : int=1e-10 ,__A : Tuple=True ,**__A : Any ,) -> str:
super().__init__(**__A )
_lowercase = hidden_size
_lowercase = intermediate_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = patch_size
_lowercase = image_size
_lowercase = initializer_range
_lowercase = attention_dropout
_lowercase = layer_norm_eps
_lowercase = hidden_act
_lowercase = qkv_bias
@classmethod
def __UpperCAmelCase ( cls : List[str] ,__A : Union[str, os.PathLike] ,**__A : Tuple ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowercase , _lowercase = cls.get_config_dict(__A ,**__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__A ,**__A )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''instructblip_qformer'''
def __init__( self : Optional[int] ,__A : str=3_0522 ,__A : str=768 ,__A : Tuple=12 ,__A : Tuple=12 ,__A : Any=3072 ,__A : List[str]="gelu" ,__A : List[str]=0.1 ,__A : int=0.1 ,__A : List[Any]=512 ,__A : Optional[int]=0.02 ,__A : Optional[Any]=1e-12 ,__A : Optional[int]=0 ,__A : Optional[int]="absolute" ,__A : Optional[Any]=2 ,__A : str=1408 ,**__A : str ,) -> Optional[int]:
super().__init__(pad_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = position_embedding_type
_lowercase = cross_attention_frequency
_lowercase = encoder_hidden_size
@classmethod
def __UpperCAmelCase ( cls : Dict ,__A : Union[str, os.PathLike] ,**__A : int ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowercase , _lowercase = cls.get_config_dict(__A ,**__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
_lowercase = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__A ,**__A )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = '''instructblip'''
SCREAMING_SNAKE_CASE_ : Tuple = True
def __init__( self : Any ,__A : int=None ,__A : List[Any]=None ,__A : str=None ,__A : int=32 ,**__A : Optional[Any] ) -> Union[str, Any]:
super().__init__(**__A )
if vision_config is None:
_lowercase = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
_lowercase = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
_lowercase = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowercase = InstructBlipVisionConfig(**__A )
_lowercase = InstructBlipQFormerConfig(**__A )
_lowercase = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase = CONFIG_MAPPING[text_model_type](**__A )
_lowercase = self.text_config.tie_word_embeddings
_lowercase = self.text_config.is_encoder_decoder
_lowercase = num_query_tokens
_lowercase = self.vision_config.hidden_size
_lowercase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase = 1.0
_lowercase = 0.02
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] ,__A : InstructBlipVisionConfig ,__A : InstructBlipQFormerConfig ,__A : PretrainedConfig ,**__A : Tuple ,) -> List[Any]:
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**__A ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = copy.deepcopy(self.__dict__ )
_lowercase = self.vision_config.to_dict()
_lowercase = self.qformer_config.to_dict()
_lowercase = self.text_config.to_dict()
_lowercase = self.__class__.model_type
return output | 67 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A ) | 67 | 1 |
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[Any] ) -> None:
create_state_space_tree(snake_case__ , [] , 0 )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[Any] , snake_case__ :list[Any] , snake_case__ :int ) -> None:
if index == len(snake_case__ ):
print(snake_case__ )
return
create_state_space_tree(snake_case__ , snake_case__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(snake_case__ , snake_case__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
snake_case = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq) | 67 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
torch.manual_seed(0 )
_lowercase = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = self.dummy_uncond_unet
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ).images
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=2 ,generator=__A ,output_type='numpy' ,return_dict=__A )[0]
_lowercase = image[0, -3:, -3:, -1]
_lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
_lowercase = 'google/ncsnpp-celebahq-256'
_lowercase = UNetaDModel.from_pretrained(__A )
_lowercase = KarrasVeScheduler()
_lowercase = KarrasVePipeline(unet=__A ,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowercase = torch.manual_seed(0 )
_lowercase = pipe(num_inference_steps=20 ,generator=__A ,output_type='numpy' ).images
_lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 67 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] ,__A : List[Any] ,__A : Union[str, Any]=13 ,__A : int=7 ,__A : Optional[int]=True ,__A : int=True ,__A : Optional[int]=True ,__A : Dict=True ,__A : Tuple=99 ,__A : Any=32 ,__A : Union[str, Any]=5 ,__A : str=4 ,__A : List[Any]=37 ,__A : List[Any]="gelu" ,__A : List[Any]=0.1 ,__A : int=0.1 ,__A : Union[str, Any]=512 ,__A : str=16 ,__A : Optional[int]=2 ,__A : Optional[Any]=0.02 ,__A : List[Any]=4 ,) -> List[str]:
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_token_type_ids
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = num_choices
def __UpperCAmelCase ( self : List[str] ) -> Any:
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase = None
if self.use_attention_mask:
_lowercase = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase = None
if self.use_token_type_ids:
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowercase = RoFormerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__A ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : List[Any] ) -> Any:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase = config_and_inputs
_lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : List[Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
for model_class_name in self.all_model_classes:
_lowercase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' ,from_pt=__A )
_lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> int:
_lowercase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowercase = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase = model(__A )[0]
_lowercase = 5_0000
_lowercase = (1, 6, vocab_size)
self.assertEqual(output.shape ,__A )
_lowercase = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] ,__A ,atol=1e-4 ) ) | 67 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :str ) -> list:
_lowercase = len(snake_case__ )
_lowercase = []
for i in range(len(snake_case__ ) - pat_len + 1 ):
_lowercase = True
for j in range(snake_case__ ):
if s[i + j] != pattern[j]:
_lowercase = False
break
if match_found:
position.append(snake_case__ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 67 | 1 |
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Sequence[int] | None = None ) -> int:
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
_lowercase = nums[0]
for i in range(1 , len(snake_case__ ) ):
_lowercase = nums[i]
_lowercase = max(snake_case__ , ans + num , snake_case__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
snake_case = int(input("""Enter number of elements : """).strip())
snake_case = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array)) | 67 |
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray ) -> bool:
return np.array_equal(snake_case__ , matrix.conjugate().T )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :np.ndarray , snake_case__ :np.ndarray ) -> Any:
_lowercase = v.conjugate().T
_lowercase = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_lowercase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
_lowercase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 67 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
_lowercase = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
return Dataset.from_dict(__A )
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
_lowercase = self._create_example_records()
_lowercase = Dataset.from_list(__A )
self.assertListEqual(dset.column_names ,['col_1', 'col_2'] )
for i, r in enumerate(__A ):
self.assertDictEqual(__A ,example_records[i] )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
_lowercase = self._create_example_records()
_lowercase = Dataset.from_list(__A )
_lowercase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info ,dset_from_dict.info )
def __UpperCAmelCase ( self : int ) -> Tuple: # checks what happens with missing columns
_lowercase = [{'col_1': 1}, {'col_2': 'x'}]
_lowercase = Dataset.from_list(__A )
self.assertDictEqual(dset[0] ,{'col_1': 1} )
self.assertDictEqual(dset[1] ,{'col_1': None} ) # NB: first record is used for columns
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]: # checks if the type can be inferred from the second record
_lowercase = [{'col_1': []}, {'col_1': [1, 2]}]
_lowercase = Dataset.from_list(__A )
self.assertEqual(dset.info.features['col_1'] ,Sequence(Value('int64' ) ) )
def __UpperCAmelCase ( self : List[str] ) -> str:
_lowercase = Dataset.from_list([] )
self.assertEqual(len(__A ) ,0 )
self.assertListEqual(dset.column_names ,[] ) | 67 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : Dict ,__A : List[Any]=7 ,__A : Dict=3 ,__A : Tuple=30 ,__A : Dict=400 ,__A : Any=True ,__A : List[Any]=None ,__A : Any=True ,__A : List[str]=[0.5, 0.5, 0.5] ,__A : Union[str, Any]=[0.5, 0.5, 0.5] ,__A : int=True ,__A : List[str]=1 / 255 ,__A : Union[str, Any]=True ,) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowercase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_rescale
_lowercase = rescale_factor
_lowercase = do_pad
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : List[str]=False ) -> Union[str, Any]:
if not batched:
_lowercase = image_inputs[0]
if isinstance(__A ,Image.Image ):
_lowercase , _lowercase = image.size
else:
_lowercase , _lowercase = image.shape[1], image.shape[2]
if w < h:
_lowercase = int(self.size['shortest_edge'] * h / w )
_lowercase = self.size['shortest_edge']
elif w > h:
_lowercase = self.size['shortest_edge']
_lowercase = int(self.size['shortest_edge'] * w / h )
else:
_lowercase = self.size['shortest_edge']
_lowercase = self.size['shortest_edge']
else:
_lowercase = []
for image in image_inputs:
_lowercase , _lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowercase = max(__A ,key=lambda __A : item[0] )[0]
_lowercase = max(__A ,key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DetaImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = DetaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A ,'image_mean' ) )
self.assertTrue(hasattr(__A ,'image_std' ) )
self.assertTrue(hasattr(__A ,'do_normalize' ) )
self.assertTrue(hasattr(__A ,'do_resize' ) )
self.assertTrue(hasattr(__A ,'do_rescale' ) )
self.assertTrue(hasattr(__A ,'do_pad' ) )
self.assertTrue(hasattr(__A ,'size' ) )
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A ,Image.Image )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,np.ndarray )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__A ,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A ,torch.Tensor )
# Test not batched input
_lowercase = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
_lowercase = image_processing(__A ,return_tensors='pt' ).pixel_values
_lowercase , _lowercase = self.image_processor_tester.get_expected_values(__A ,batched=__A )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
# prepare image and target
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'image_id': 3_9769, 'annotations': target}
# encode them
_lowercase = DetaImageProcessor()
_lowercase = image_processing(images=__A ,annotations=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
_lowercase = json.loads(f.read() )
_lowercase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_lowercase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowercase = DetaImageProcessor(format='coco_panoptic' )
_lowercase = image_processing(images=__A ,annotations=__A ,masks_path=__A ,return_tensors='pt' )
# verify pixel values
_lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,__A )
_lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__A ,atol=1e-4 ) )
# verify area
_lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__A ) )
# verify boxes
_lowercase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__A )
_lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__A ,atol=1e-3 ) )
# verify image_id
_lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__A ) )
# verify is_crowd
_lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__A ) )
# verify class_labels
_lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__A ) )
# verify masks
_lowercase = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__A )
# verify orig_size
_lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__A ) )
# verify size
_lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__A ) ) | 67 | 1 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
snake_case = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
snake_case = logging.get_logger(__name__)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = '''maskformer'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''hidden_size''': '''mask_feature_size'''}
SCREAMING_SNAKE_CASE_ : List[Any] = ['''resnet''', '''swin''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''detr''']
def __init__( self : Any ,__A : int = 256 ,__A : int = 256 ,__A : float = 0.1 ,__A : bool = False ,__A : Optional[Dict] = None ,__A : Optional[Dict] = None ,__A : float = 0.02 ,__A : float = 1.0 ,__A : float = 1.0 ,__A : float = 1.0 ,__A : float = 20.0 ,__A : Optional[bool] = None ,**__A : Tuple ,) -> Dict:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_lowercase = SwinConfig(
image_size=384 ,in_channels=3 ,patch_size=4 ,embed_dim=128 ,depths=[2, 2, 18, 2] ,num_heads=[4, 8, 16, 32] ,window_size=12 ,drop_path_rate=0.3 ,out_features=['stage1', 'stage2', 'stage3', 'stage4'] ,)
if isinstance(__A ,__A ):
_lowercase = backbone_config.pop('model_type' )
_lowercase = CONFIG_MAPPING[backbone_model_type]
_lowercase = config_class.from_dict(__A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_lowercase = DetrConfig()
else:
# verify that the decoder is supported
_lowercase = (
decoder_config.pop('model_type' ) if isinstance(__A ,__A ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {",".join(self.decoders_supported )}""" )
if isinstance(__A ,__A ):
_lowercase = CONFIG_MAPPING[decoder_type]
_lowercase = config_class.from_dict(__A )
_lowercase = backbone_config
_lowercase = decoder_config
# main feature dimension for the model
_lowercase = fpn_feature_size
_lowercase = mask_feature_size
# initializer
_lowercase = init_std
_lowercase = init_xavier_std
# Hungarian matcher && loss
_lowercase = cross_entropy_weight
_lowercase = dice_weight
_lowercase = mask_weight
_lowercase = use_auxiliary_loss
_lowercase = no_object_weight
_lowercase = output_auxiliary_logits
_lowercase = self.decoder_config.encoder_attention_heads
_lowercase = self.decoder_config.num_hidden_layers
super().__init__(**__A )
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] ,__A : PretrainedConfig ,__A : PretrainedConfig ,**__A : Union[str, Any] ) -> Optional[Any]:
return cls(
backbone_config=__A ,decoder_config=__A ,**__A ,)
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict[str, any]:
_lowercase = copy.deepcopy(self.__dict__ )
_lowercase = self.backbone_config.to_dict()
_lowercase = self.decoder_config.to_dict()
_lowercase = self.__class__.model_type
return output | 67 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
snake_case = False
snake_case = False
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Namespace ) -> Tuple:
return TrainCommand(snake_case__ )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( __A : ArgumentParser ) -> List[Any]:
_lowercase = parser.add_parser('train' ,help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' ,type=__A ,required=__A ,help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' ,)
train_parser.add_argument(
'--column_label' ,type=__A ,default=0 ,help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' ,type=__A ,default=1 ,help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' ,type=__A ,default=2 ,help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' ,action='store_true' ,help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' ,type=__A ,default='' ,help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' ,type=__A ,default=0.1 ,help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' ,)
train_parser.add_argument('--output' ,type=__A ,default='./' ,help='path to saved the trained model.' )
train_parser.add_argument(
'--task' ,type=__A ,default='text_classification' ,help='Task to train the model on.' )
train_parser.add_argument(
'--model' ,type=__A ,default='bert-base-uncased' ,help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' ,type=__A ,default=32 ,help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' ,type=__A ,default=64 ,help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' ,type=__A ,default=3e-5 ,help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' ,type=__A ,default=1e-08 ,help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self : Optional[Any] ,__A : Namespace ) -> Tuple:
_lowercase = logging.get_logger('transformers-cli/training' )
_lowercase = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output ,exist_ok=__A )
_lowercase = args.output
_lowercase = args.column_label
_lowercase = args.column_text
_lowercase = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
_lowercase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
_lowercase = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
_lowercase = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_lowercase = args.validation_split
_lowercase = args.train_batch_size
_lowercase = args.valid_batch_size
_lowercase = args.learning_rate
_lowercase = args.adam_epsilon
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
raise NotImplementedError
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 67 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''transfo-xl'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''mems''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] ,__A : Union[str, Any]=26_7735 ,__A : List[Any]=[2_0000, 4_0000, 20_0000] ,__A : Dict=1024 ,__A : str=1024 ,__A : Dict=16 ,__A : int=64 ,__A : Dict=4096 ,__A : List[Any]=4 ,__A : Optional[int]=False ,__A : Union[str, Any]=18 ,__A : Tuple=1600 ,__A : str=1000 ,__A : Dict=True ,__A : Dict=True ,__A : int=0 ,__A : Optional[int]=-1 ,__A : int=True ,__A : List[str]=0.1 ,__A : Optional[int]=0.0 ,__A : str=True ,__A : Tuple="normal" ,__A : Union[str, Any]=0.01 ,__A : Tuple=0.01 ,__A : Any=0.02 ,__A : Union[str, Any]=1e-5 ,__A : List[Any]=0 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = []
self.cutoffs.extend(__A )
if proj_share_all_but_first:
_lowercase = [False] + [True] * len(self.cutoffs )
else:
_lowercase = [False] + [False] * len(self.cutoffs )
_lowercase = d_model
_lowercase = d_embed
_lowercase = d_head
_lowercase = d_inner
_lowercase = div_val
_lowercase = pre_lnorm
_lowercase = n_layer
_lowercase = n_head
_lowercase = mem_len
_lowercase = same_length
_lowercase = attn_type
_lowercase = clamp_len
_lowercase = sample_softmax
_lowercase = adaptive
_lowercase = dropout
_lowercase = dropatt
_lowercase = untie_r
_lowercase = init
_lowercase = init_range
_lowercase = proj_init_std
_lowercase = init_std
_lowercase = layer_norm_epsilon
super().__init__(eos_token_id=__A ,**__A )
@property
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self : Any ,__A : Dict ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 67 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> str:
_lowercase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_lowercase = 1024
_lowercase = 4096
_lowercase = 24
_lowercase = 16
_lowercase = [5, 11, 17, 23]
_lowercase = [256, 512, 1024, 1024]
_lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = [256, 512, 768, 768]
_lowercase = 150
_lowercase = 16
_lowercase = (1, 384, 384)
_lowercase = False
_lowercase = 'project'
if "ade" in checkpoint_url:
_lowercase = True
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = 150
_lowercase = 16
_lowercase = 'huggingface/label-files'
_lowercase = 'ade20k-id2label.json'
_lowercase = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
_lowercase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowercase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowercase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowercase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_lowercase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowercase = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_lowercase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowercase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowercase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowercase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowercase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowercase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowercase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowercase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_lowercase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowercase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowercase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowercase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowercase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowercase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowercase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowercase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowercase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowercase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowercase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowercase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowercase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowercase = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowercase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowercase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowercase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_lowercase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_lowercase = name.replace('..' , '.' )
if "stem.conv" in name:
_lowercase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_lowercase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_lowercase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_lowercase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_lowercase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_lowercase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :int ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[: config.hidden_size, :]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Any , snake_case__ :List[str] ) -> str:
_lowercase , _lowercase = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowercase = torch.load(snake_case__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
_lowercase = state_dict.pop(snake_case__ )
_lowercase = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
_lowercase = DPTForSemanticSegmentation(snake_case__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
_lowercase = 480 if 'ade' in checkpoint_url else 384
_lowercase = DPTImageProcessor(size=snake_case__ )
_lowercase = prepare_img()
_lowercase = image_processor(snake_case__ , return_tensors='pt' )
# forward pass
_lowercase = model(**snake_case__ ).logits if 'ade' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
_lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 67 | 1 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
snake_case = logging.get_logger(__name__)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] ,*__A : List[str] ,**__A : Optional[int] ) -> None:
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' ,__A ,)
super().__init__(*__A ,**__A ) | 67 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 1 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Dict ,) -> Optional[Any]:
_lowercase = parent
_lowercase = 13
_lowercase = 7
_lowercase = 30
_lowercase = self.seq_length + self.mem_len
_lowercase = 15
_lowercase = True
_lowercase = True
_lowercase = 99
_lowercase = [10, 50, 80]
_lowercase = 32
_lowercase = 32
_lowercase = 4
_lowercase = 8
_lowercase = 128
_lowercase = 2
_lowercase = 2
_lowercase = None
_lowercase = 1
_lowercase = 0
_lowercase = 3
_lowercase = self.vocab_size - 1
_lowercase = 0.01
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase = TransfoXLConfig(
vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,)
return (config, input_ids_a, input_ids_a, lm_labels)
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __UpperCAmelCase ( self : str ,__A : Union[str, Any] ,__A : Dict ,__A : Dict ,__A : Union[str, Any] ) -> Union[str, Any]:
_lowercase = TFTransfoXLModel(__A )
_lowercase , _lowercase = model(__A ).to_tuple()
_lowercase = {'input_ids': input_ids_a, 'mems': mems_a}
_lowercase , _lowercase = model(__A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def __UpperCAmelCase ( self : List[Any] ,__A : int ,__A : Any ,__A : Tuple ,__A : str ) -> str:
_lowercase = TFTransfoXLLMHeadModel(__A )
_lowercase , _lowercase = model(__A ).to_tuple()
_lowercase = {'input_ids': input_ids_a, 'labels': lm_labels}
_lowercase , _lowercase = model(__A ).to_tuple()
_lowercase , _lowercase = model([input_ids_a, mems_a] ).to_tuple()
_lowercase = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
_lowercase , _lowercase = model(__A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def __UpperCAmelCase ( self : Tuple ,__A : Tuple ,__A : Optional[int] ,__A : List[Any] ,__A : int ) -> Dict:
_lowercase = TFTransfoXLForSequenceClassification(__A )
_lowercase = model(__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) = config_and_inputs
_lowercase = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class A_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE_ : int = () if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Dict = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
def __UpperCAmelCase ( self : Union[str, Any] ,__A : Any ,__A : Union[str, Any] ,__A : Any ,__A : Tuple ,__A : str ) -> int:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __UpperCAmelCase ( self : Any ) -> List[str]:
_lowercase = TFTransfoXLModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,d_embed=37 )
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
self.model_tester.set_seed()
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__A )
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
self.model_tester.set_seed()
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__A )
def __UpperCAmelCase ( self : str ) -> List[str]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__A )
def __UpperCAmelCase ( self : Any ) -> str:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowercase = model.get_output_embeddings()
assert isinstance(__A ,tf.keras.layers.Layer )
_lowercase = model.get_bias()
assert name is None
else:
_lowercase = model.get_output_embeddings()
assert x is None
_lowercase = model.get_bias()
assert name is None
def __UpperCAmelCase ( self : Optional[int] ) -> int:
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = TFTransfoXLModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
pass
@require_tf
class A_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def __UpperCAmelCase ( self : Any ) -> Dict:
_lowercase = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
_lowercase = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowercase = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowercase = model.generate(__A ,max_length=200 ,do_sample=__A )
self.assertListEqual(output_ids[0].numpy().tolist() ,__A ) | 67 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 67 | 1 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
snake_case = {
# 1536-bit
5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 2048-bit
1_4: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 3072-bit
1_5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 4096-bit
1_6: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 6144-bit
1_7: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 8192-bit
1_8: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
}
class A_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,__A : int = 14 ) -> None:
if group not in primes:
raise ValueError('Unsupported Group' )
_lowercase = primes[group]['prime']
_lowercase = primes[group]['generator']
_lowercase = int(hexlify(urandom(32 ) ) ,base=16 )
def __UpperCAmelCase ( self : List[Any] ) -> str:
return hex(self.__private_key )[2:]
def __UpperCAmelCase ( self : Any ) -> str:
_lowercase = pow(self.generator ,self.__private_key ,self.prime )
return hex(__A )[2:]
def __UpperCAmelCase ( self : str ,__A : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(__A ,(self.prime - 1) // 2 ,self.prime ) == 1
)
def __UpperCAmelCase ( self : Dict ,__A : str ) -> str:
_lowercase = int(__A ,base=16 )
if not self.is_valid_public_key(__A ):
raise ValueError('Invalid public key' )
_lowercase = pow(__A ,self.__private_key ,self.prime )
return shaaaa(str(__A ).encode() ).hexdigest()
@staticmethod
def __UpperCAmelCase ( __A : int ,__A : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(__A ,(prime - 1) // 2 ,__A ) == 1
)
@staticmethod
def __UpperCAmelCase ( __A : str ,__A : str ,__A : int = 14 ) -> str:
_lowercase = int(__A ,base=16 )
_lowercase = int(__A ,base=16 )
_lowercase = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(__A ,__A ):
raise ValueError('Invalid public key' )
_lowercase = pow(__A ,__A ,__A )
return shaaaa(str(__A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''transfo-xl'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''mems''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] ,__A : Union[str, Any]=26_7735 ,__A : List[Any]=[2_0000, 4_0000, 20_0000] ,__A : Dict=1024 ,__A : str=1024 ,__A : Dict=16 ,__A : int=64 ,__A : Dict=4096 ,__A : List[Any]=4 ,__A : Optional[int]=False ,__A : Union[str, Any]=18 ,__A : Tuple=1600 ,__A : str=1000 ,__A : Dict=True ,__A : Dict=True ,__A : int=0 ,__A : Optional[int]=-1 ,__A : int=True ,__A : List[str]=0.1 ,__A : Optional[int]=0.0 ,__A : str=True ,__A : Tuple="normal" ,__A : Union[str, Any]=0.01 ,__A : Tuple=0.01 ,__A : Any=0.02 ,__A : Union[str, Any]=1e-5 ,__A : List[Any]=0 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = []
self.cutoffs.extend(__A )
if proj_share_all_but_first:
_lowercase = [False] + [True] * len(self.cutoffs )
else:
_lowercase = [False] + [False] * len(self.cutoffs )
_lowercase = d_model
_lowercase = d_embed
_lowercase = d_head
_lowercase = d_inner
_lowercase = div_val
_lowercase = pre_lnorm
_lowercase = n_layer
_lowercase = n_head
_lowercase = mem_len
_lowercase = same_length
_lowercase = attn_type
_lowercase = clamp_len
_lowercase = sample_softmax
_lowercase = adaptive
_lowercase = dropout
_lowercase = dropatt
_lowercase = untie_r
_lowercase = init
_lowercase = init_range
_lowercase = proj_init_std
_lowercase = init_std
_lowercase = layer_norm_epsilon
super().__init__(eos_token_id=__A ,**__A )
@property
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self : Any ,__A : Dict ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 67 | 1 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
) | 67 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''dpr'''
def __init__( self : int ,__A : Union[str, Any]=3_0522 ,__A : Optional[int]=768 ,__A : int=12 ,__A : List[Any]=12 ,__A : Optional[Any]=3072 ,__A : Union[str, Any]="gelu" ,__A : Union[str, Any]=0.1 ,__A : List[Any]=0.1 ,__A : str=512 ,__A : List[str]=2 ,__A : Tuple=0.02 ,__A : Tuple=1e-12 ,__A : List[Any]=0 ,__A : List[str]="absolute" ,__A : int = 0 ,**__A : int ,) -> Tuple:
super().__init__(pad_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = projection_dim
_lowercase = position_embedding_type | 67 | 1 |
from copy import deepcopy
class A_ :
"""simple docstring"""
def __init__( self : Optional[int] ,__A : list[int] | None = None ,__A : int | None = None ) -> None:
if arr is None and size is not None:
_lowercase = size
_lowercase = [0] * size
elif arr is not None:
self.init(__A )
else:
raise ValueError('Either arr or size must be specified' )
def __UpperCAmelCase ( self : Optional[int] ,__A : list[int] ) -> None:
_lowercase = len(__A )
_lowercase = deepcopy(__A )
for i in range(1 ,self.size ):
_lowercase = self.next_(__A )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCAmelCase ( self : List[Any] ) -> list[int]:
_lowercase = self.tree[:]
for i in range(self.size - 1 ,0 ,-1 ):
_lowercase = self.next_(__A )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCAmelCase ( __A : int ) -> int:
return index + (index & (-index))
@staticmethod
def __UpperCAmelCase ( __A : int ) -> int:
return index - (index & (-index))
def __UpperCAmelCase ( self : Any ,__A : int ,__A : int ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_lowercase = self.next_(__A )
def __UpperCAmelCase ( self : List[Any] ,__A : int ,__A : int ) -> None:
self.add(__A ,value - self.get(__A ) )
def __UpperCAmelCase ( self : List[str] ,__A : int ) -> int:
if right == 0:
return 0
_lowercase = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_lowercase = self.prev(__A )
return result
def __UpperCAmelCase ( self : List[str] ,__A : int ,__A : int ) -> int:
return self.prefix(__A ) - self.prefix(__A )
def __UpperCAmelCase ( self : Any ,__A : int ) -> int:
return self.query(__A ,index + 1 )
def __UpperCAmelCase ( self : str ,__A : int ) -> int:
value -= self.tree[0]
if value < 0:
return -1
_lowercase = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_lowercase = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main() | 67 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''camembert'''
def __init__( self : Optional[int] ,__A : Optional[int]=3_0522 ,__A : List[Any]=768 ,__A : Optional[int]=12 ,__A : int=12 ,__A : Dict=3072 ,__A : Union[str, Any]="gelu" ,__A : Union[str, Any]=0.1 ,__A : Any=0.1 ,__A : int=512 ,__A : Union[str, Any]=2 ,__A : Union[str, Any]=0.02 ,__A : Union[str, Any]=1e-12 ,__A : Dict=1 ,__A : List[str]=0 ,__A : int=2 ,__A : Tuple="absolute" ,__A : Union[str, Any]=True ,__A : Optional[Any]=None ,**__A : Optional[Any] ,) -> Optional[int]:
super().__init__(pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,**__A )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = position_embedding_type
_lowercase = use_cache
_lowercase = classifier_dropout
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 67 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = '''big_bird'''
def __init__( self : str ,__A : Union[str, Any]=5_0358 ,__A : Any=768 ,__A : List[str]=12 ,__A : Union[str, Any]=12 ,__A : int=3072 ,__A : Tuple="gelu_new" ,__A : Any=0.1 ,__A : Optional[Any]=0.1 ,__A : Tuple=4096 ,__A : int=2 ,__A : Union[str, Any]=0.02 ,__A : Optional[int]=1e-12 ,__A : List[str]=True ,__A : List[Any]=0 ,__A : Optional[Any]=1 ,__A : Optional[int]=2 ,__A : Optional[int]=66 ,__A : Tuple="block_sparse" ,__A : Optional[int]=True ,__A : Optional[int]=False ,__A : Tuple=64 ,__A : str=3 ,__A : Optional[int]=None ,**__A : Dict ,) -> Union[str, Any]:
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,sep_token_id=__A ,**__A ,)
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
_lowercase = use_cache
_lowercase = rescale_embeddings
_lowercase = attention_type
_lowercase = use_bias
_lowercase = block_size
_lowercase = num_random_blocks
_lowercase = classifier_dropout
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 67 | 1 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
"""simple docstring"""
def __init__( self : str ,__A : Optional[int] ,__A : str=2 ,__A : List[str]=3 ,__A : List[Any]=4 ,__A : List[Any]=2 ,__A : List[str]=7 ,__A : Dict=True ,__A : Union[str, Any]=True ,__A : Optional[Any]=True ,__A : Optional[Any]=True ,__A : int=99 ,__A : str=36 ,__A : Tuple=2 ,__A : int=4 ,__A : int=37 ,__A : Union[str, Any]="gelu" ,__A : str=0.1 ,__A : Union[str, Any]=0.1 ,__A : Any=512 ,__A : Optional[int]=16 ,__A : List[Any]=2 ,__A : int=0.02 ,__A : Union[str, Any]=6 ,__A : Dict=6 ,__A : str=3 ,__A : Optional[Any]=4 ,__A : str=None ,__A : str=1000 ,) -> Dict:
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = image_size
_lowercase = patch_size
_lowercase = is_training
_lowercase = use_input_mask
_lowercase = use_token_type_ids
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = coordinate_size
_lowercase = shape_size
_lowercase = num_labels
_lowercase = num_choices
_lowercase = scope
_lowercase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowercase = text_seq_length
_lowercase = (image_size // patch_size) ** 2 + 1
_lowercase = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
_lowercase = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
_lowercase = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase = bbox[i, j, 3]
_lowercase = bbox[i, j, 1]
_lowercase = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase = bbox[i, j, 2]
_lowercase = bbox[i, j, 0]
_lowercase = tmp_coordinate
_lowercase = tf.constant(__A )
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = None
if self.use_input_mask:
_lowercase = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowercase = None
if self.use_token_type_ids:
_lowercase = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
_lowercase = None
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowercase = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
_lowercase = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self : Optional[int] ,__A : List[Any] ,__A : int ,__A : List[Any] ,__A : Tuple ,__A : Optional[Any] ,__A : List[Any] ) -> Optional[Any]:
_lowercase = TFLayoutLMvaModel(config=__A )
# text + image
_lowercase = model(__A ,pixel_values=__A ,training=__A )
_lowercase = model(
__A ,bbox=__A ,pixel_values=__A ,attention_mask=__A ,token_type_ids=__A ,training=__A ,)
_lowercase = model(__A ,bbox=__A ,pixel_values=__A ,training=__A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowercase = model(__A ,training=__A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowercase = model({'pixel_values': pixel_values} ,training=__A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : int ,__A : Optional[Any] ,__A : List[str] ,__A : List[str] ,__A : List[Any] ,__A : int ,__A : Union[str, Any] ,__A : List[str] ) -> Tuple:
_lowercase = self.num_labels
_lowercase = TFLayoutLMvaForSequenceClassification(config=__A )
_lowercase = model(
__A ,bbox=__A ,pixel_values=__A ,attention_mask=__A ,token_type_ids=__A ,labels=__A ,training=__A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Any ,__A : Any ,__A : Optional[Any] ,__A : Optional[int] ,__A : Dict ,__A : List[str] ,__A : Tuple ,__A : Optional[Any] ) -> Optional[Any]:
_lowercase = self.num_labels
_lowercase = TFLayoutLMvaForTokenClassification(config=__A )
_lowercase = model(
__A ,bbox=__A ,pixel_values=__A ,attention_mask=__A ,token_type_ids=__A ,labels=__A ,training=__A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self : int ,__A : Optional[Any] ,__A : Union[str, Any] ,__A : int ,__A : List[Any] ,__A : str ,__A : Optional[Any] ,__A : Any ) -> Optional[Any]:
_lowercase = 2
_lowercase = TFLayoutLMvaForQuestionAnswering(config=__A )
_lowercase = model(
__A ,bbox=__A ,pixel_values=__A ,attention_mask=__A ,token_type_ids=__A ,start_positions=__A ,end_positions=__A ,training=__A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) = config_and_inputs
_lowercase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Dict = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def __UpperCAmelCase ( self : List[Any] ,__A : str ,__A : Any ,__A : Optional[int] ,__A : Tuple ,__A : str ) -> Any:
return True
def __UpperCAmelCase ( self : Dict ,__A : Union[str, Any] ,__A : Tuple ,__A : Optional[Any]=False ) -> dict:
_lowercase = copy.deepcopy(__A )
if model_class in get_values(__A ):
_lowercase = {
k: tf.tile(tf.expand_dims(__A ,1 ) ,(1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__A ,tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__A ):
_lowercase = tf.ones(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(__A ):
_lowercase = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
_lowercase = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(__A ):
_lowercase = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(__A ):
_lowercase = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=tf.intaa )
return inputs_dict
def __UpperCAmelCase ( self : int ) -> Tuple:
_lowercase = TFLayoutLMvaModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,hidden_size=37 )
def __UpperCAmelCase ( self : List[str] ) -> Any:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Any ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
if getattr(__A ,'hf_compute_loss' ,__A ):
# The number of elements in the loss should be the same as the number of elements in the label
_lowercase = self._prepare_for_class(inputs_dict.copy() ,__A ,return_labels=__A )
_lowercase = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() ,reverse=__A )[0]
]
_lowercase = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_lowercase = self._prepare_for_class(inputs_dict.copy() ,__A ,return_labels=__A )
_lowercase = prepared_for_class.pop('input_ids' )
_lowercase = model(__A ,**__A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_lowercase = self._prepare_for_class(inputs_dict.copy() ,__A ,return_labels=__A )
_lowercase = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
_lowercase = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_lowercase = -100
_lowercase = tf.convert_to_tensor(__A )
_lowercase = model(__A ,**__A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_lowercase = self._prepare_for_class(inputs_dict.copy() ,__A ,return_labels=__A )
_lowercase = model(__A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_lowercase = self._prepare_for_class(inputs_dict.copy() ,__A ,return_labels=__A )
# Get keys that were added with the _prepare_for_class function
_lowercase = prepared_for_class.keys() - inputs_dict.keys()
_lowercase = inspect.signature(model.call ).parameters
_lowercase = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_lowercase = {0: 'input_ids'}
for label_key in label_keys:
_lowercase = signature_names.index(__A )
_lowercase = label_key
_lowercase = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_lowercase = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_lowercase = prepared_for_class[value]
_lowercase = tuple(__A )
# Send to model
_lowercase = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__A ,__A ,__A ,__A ,__A ,__A )
def __UpperCAmelCase ( self : Tuple ) -> int:
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase = type
self.model_tester.create_and_check_model(__A ,__A ,__A ,__A ,__A ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__A ,__A ,__A ,__A ,__A ,__A ,__A )
def __UpperCAmelCase ( self : Dict ) -> Dict:
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__A ,__A ,__A ,__A ,__A ,__A ,__A )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__A ,__A ,__A ,__A ,__A ,__A ,__A )
@slow
def __UpperCAmelCase ( self : Dict ) -> Dict:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = TFLayoutLMvaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
return LayoutLMvaImageProcessor(apply_ocr=__A ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : int ) -> Any:
_lowercase = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
_lowercase = self.default_image_processor
_lowercase = prepare_img()
_lowercase = image_processor(images=__A ,return_tensors='tf' ).pixel_values
_lowercase = tf.constant([[1, 2]] )
_lowercase = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) ,axis=0 )
# forward pass
_lowercase = model(input_ids=__A ,bbox=__A ,pixel_values=__A ,training=__A )
# verify the logits
_lowercase = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape ,__A )
_lowercase = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,__A ,atol=1e-4 ) ) | 67 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> list:
_lowercase = [0] * len(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
# use last results for better performance - dynamic programming
_lowercase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowercase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowercase = j
return prefix_result
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 67 | 1 |
snake_case = {str(digit): digit**5 for digit in range(1_0)}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case__ ) )
if __name__ == "__main__":
print(solution()) | 67 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff | 67 | 1 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Union[str, Any]:
_lowercase = len(snake_case__ )
_lowercase = sum(snake_case__ )
_lowercase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_lowercase = True
for i in range(1 , s + 1 ):
_lowercase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_lowercase = dp[i][j - 1]
if arr[i - 1] <= j:
_lowercase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_lowercase = s - 2 * j
break
return diff | 67 |
from manim import *
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = Rectangle(height=0.5 ,width=0.5 )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowercase = Rectangle(height=0.25 ,width=0.25 )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('CPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(4 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('GPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
gpu.move_to([-1, -1, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('Model' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
model.move_to([3, -1.0, 0] )
self.add(__A )
_lowercase = []
_lowercase = []
for i, rect in enumerate(__A ):
_lowercase = fill.copy().set_fill(__A ,opacity=0.8 )
target.move_to(__A )
model_arr.append(__A )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__A ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__A )
self.add(*__A ,*__A )
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('Disk' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
disk.move_to([-4, -1.25, 0] )
self.add(__A ,__A )
_lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__A ,__A )
_lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__A ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__A )
_lowercase = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ) )
_lowercase = Square(0.3 )
input.set_fill(__A ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__A ,buff=0.5 )
self.play(Write(__A ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__A ,buff=0.02 )
self.play(MoveToTarget(__A ) )
self.play(FadeOut(__A ) )
_lowercase = Arrow(start=__A ,end=__A ,color=__A ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__A ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowercase = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) )
_lowercase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__A ) ,Circumscribe(model_arr[0] ,color=__A ,**__A ) ,Circumscribe(model_cpu_arr[0] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__A ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_lowercase = AnimationGroup(
FadeOut(__A ,run_time=0.5 ) ,MoveToTarget(__A ,run_time=0.5 ) ,FadeIn(__A ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowercase = 0.7
self.play(
Circumscribe(model_arr[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,Circumscribe(model_arr[i + 1] ,color=__A ,**__A ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__A ,**__A ) ,Circumscribe(cpu_left_col_base[-1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowercase = a_c
_lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__A ) ,FadeOut(__A ,run_time=0.5 ) ,)
_lowercase = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) ,MoveToTarget(__A ) )
self.wait() | 67 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case = {
"""configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""AlbertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""AlbertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AlbertForMaskedLM""",
"""AlbertForMultipleChoice""",
"""AlbertForPreTraining""",
"""AlbertForQuestionAnswering""",
"""AlbertForSequenceClassification""",
"""AlbertForTokenClassification""",
"""AlbertModel""",
"""AlbertPreTrainedModel""",
"""load_tf_weights_in_albert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAlbertForMaskedLM""",
"""TFAlbertForMultipleChoice""",
"""TFAlbertForPreTraining""",
"""TFAlbertForQuestionAnswering""",
"""TFAlbertForSequenceClassification""",
"""TFAlbertForTokenClassification""",
"""TFAlbertMainLayer""",
"""TFAlbertModel""",
"""TFAlbertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""FlaxAlbertForMaskedLM""",
"""FlaxAlbertForMultipleChoice""",
"""FlaxAlbertForPreTraining""",
"""FlaxAlbertForQuestionAnswering""",
"""FlaxAlbertForSequenceClassification""",
"""FlaxAlbertForTokenClassification""",
"""FlaxAlbertModel""",
"""FlaxAlbertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Any ,__A : Tuple=None ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : int="resnet50" ,__A : int=3 ,__A : List[Any]=32 ,__A : Tuple=3 ,__A : List[Any]=True ,__A : Tuple=True ,) -> Any:
_lowercase = parent
_lowercase = out_indices if out_indices is not None else [4]
_lowercase = stage_names
_lowercase = out_features
_lowercase = backbone
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = use_pretrained_backbone
_lowercase = is_training
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __UpperCAmelCase ( self : Any ,__A : Any ,__A : Dict ) -> Union[str, Any]:
_lowercase = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = TimmBackboneModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : int ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = 'resnet18'
_lowercase = 'microsoft/resnet-18'
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A )
_lowercase = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A ,out_indices=[1, 2, 3] )
_lowercase = AutoBackbone.from_pretrained(__A ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : int ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
_lowercase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase = self.all_model_classes[0]
_lowercase = model_class(__A )
model.to(__A )
_lowercase = self._prepare_for_class(__A ,__A )
_lowercase = model(**__A )
_lowercase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase = copy.deepcopy(__A )
_lowercase = None
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
_lowercase = copy.deepcopy(__A )
_lowercase = False
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A ) | 67 | 1 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
with open(os.path.dirname(snake_case__ ) + '/grid.txt' ) as f:
_lowercase = [] # noqa: E741
for _ in range(20 ):
l.append([int(snake_case__ ) for x in f.readline().split()] )
_lowercase = 0
# right
for i in range(20 ):
for j in range(17 ):
_lowercase = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_lowercase = temp
# down
for i in range(17 ):
for j in range(20 ):
_lowercase = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_lowercase = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_lowercase = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_lowercase = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_lowercase = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_lowercase = temp
return maximum
if __name__ == "__main__":
print(solution()) | 67 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 | 1 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
"""simple docstring"""
def __init__( self : List[Any] ,__A : Dict ,__A : Union[str, Any]=13 ,__A : str=30 ,__A : Any=2 ,__A : Dict=3 ,__A : List[str]=True ,__A : Optional[int]=True ,__A : List[Any]=32 ,__A : Optional[int]=5 ,__A : List[Any]=4 ,__A : Optional[Any]=37 ,__A : Optional[Any]="gelu" ,__A : Optional[int]=0.1 ,__A : Tuple=0.1 ,__A : Tuple=10 ,__A : Dict=0.02 ,__A : List[Any]=None ,__A : Optional[Any]=2 ,) -> List[str]:
_lowercase = parent
_lowercase = batch_size
_lowercase = image_size
_lowercase = patch_size
_lowercase = num_channels
_lowercase = is_training
_lowercase = use_labels
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = scope
_lowercase = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase = (image_size // patch_size) ** 2
_lowercase = num_patches + 1
def __UpperCAmelCase ( self : int ) -> int:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowercase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Tuple ) -> str:
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__A ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def __UpperCAmelCase ( self : str ,__A : Union[str, Any] ,__A : Optional[int] ,__A : List[Any] ) -> Union[str, Any]:
_lowercase = ViTModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : List[Any] ,__A : str ,__A : List[Any] ,__A : int ) -> Union[str, Any]:
_lowercase = ViTForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowercase = 1
_lowercase = ViTForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase = model(__A )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCAmelCase ( self : str ,__A : Union[str, Any] ,__A : Dict ,__A : List[Any] ) -> int:
_lowercase = self.type_sequence_label_size
_lowercase = ViTForImageClassification(__A )
model.to(__A )
model.eval()
_lowercase = model(__A ,labels=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowercase = 1
_lowercase = ViTForImageClassification(__A )
model.to(__A )
model.eval()
_lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase = model(__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
_lowercase = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : str = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def __UpperCAmelCase ( self : Any ) -> Tuple:
_lowercase = ViTModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A ,hidden_size=37 )
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __UpperCAmelCase ( self : int ) -> List[Any]:
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A ,nn.Linear ) )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __UpperCAmelCase ( self : Dict ) -> Any:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def __UpperCAmelCase ( self : str ) -> Any:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Any:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = ViTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : int ) -> Tuple:
_lowercase = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(__A )
_lowercase = self.default_image_processor
_lowercase = prepare_img()
_lowercase = image_processor(images=__A ,return_tensors='pt' ).to(__A )
# forward pass
with torch.no_grad():
_lowercase = model(**__A )
# verify the logits
_lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,__A )
_lowercase = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__A ,atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Any:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_lowercase = ViTModel.from_pretrained('facebook/dino-vits8' ).to(__A )
_lowercase = ViTImageProcessor.from_pretrained('facebook/dino-vits8' ,size=480 )
_lowercase = prepare_img()
_lowercase = image_processor(images=__A ,return_tensors='pt' )
_lowercase = inputs.pixel_values.to(__A )
# forward pass
with torch.no_grad():
_lowercase = model(__A ,interpolate_pos_encoding=__A )
# verify the logits
_lowercase = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape ,__A )
_lowercase = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(__A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__A ,atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __UpperCAmelCase ( self : str ) -> Dict:
_lowercase = ViTModel.from_pretrained('facebook/dino-vits8' ,torch_dtype=torch.floataa ,device_map='auto' )
_lowercase = self.default_image_processor
_lowercase = prepare_img()
_lowercase = image_processor(images=__A ,return_tensors='pt' )
_lowercase = inputs.pixel_values.to(__A )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_lowercase = model(__A ) | 67 |
snake_case = {str(digit): digit**5 for digit in range(1_0)}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case__ ) )
if __name__ == "__main__":
print(solution()) | 67 | 1 |
from sklearn.metrics import recall_score
import datasets
snake_case = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
snake_case = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
snake_case = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] ,)
def __UpperCAmelCase ( self : Optional[Any] ,__A : Union[str, Any] ,__A : Tuple ,__A : Tuple=None ,__A : List[str]=1 ,__A : List[str]="binary" ,__A : Optional[int]=None ,__A : str="warn" ,) -> Optional[Any]:
_lowercase = recall_score(
__A ,__A ,labels=__A ,pos_label=__A ,average=__A ,sample_weight=__A ,zero_division=__A ,)
return {"recall": float(__A ) if score.size == 1 else score} | 67 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> list[int]:
_lowercase = str(snake_case__ )
_lowercase = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 11 ) -> list[int]:
_lowercase = []
_lowercase = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
_lowercase = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""") | 67 | 1 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
_lowercase = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
_lowercase = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(__A )
from datasets import load_dataset
_lowercase = load_dataset('nielsr/rvlcdip-demo' )
_lowercase = dataset['train'][0]['image'].convert('RGB' )
_lowercase = image_processor(__A ,return_tensors='pt' ).to(__A )
# forward pass
with torch.no_grad():
_lowercase = model(**__A )
_lowercase = outputs.logits
_lowercase = torch.Size((1, 16) )
self.assertEqual(logits.shape ,__A )
_lowercase = torch.tensor(
[-0.4158, -0.4092, -0.4347] ,device=__A ,dtype=torch.float ,)
self.assertTrue(torch.allclose(logits[0, :3] ,__A ,atol=1e-4 ) ) | 67 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,__A : Tuple ,__A : Any=99 ,__A : Any=13 ,__A : Dict=7 ,__A : List[Any]=9 ,__A : Dict=True ,__A : Any=True ,__A : Tuple=False ,__A : str=32 ,__A : int=5 ,__A : List[str]=4 ,__A : Optional[Any]=37 ,__A : int=8 ,__A : Any=0.1 ,__A : Dict=0.002 ,__A : Union[str, Any]=1 ,__A : Optional[Any]=0 ,__A : int=0 ,__A : Tuple=None ,__A : str=None ,) -> List[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = encoder_seq_length
_lowercase = decoder_seq_length
# For common tests
_lowercase = self.decoder_seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = d_ff
_lowercase = relative_attention_num_buckets
_lowercase = dropout_rate
_lowercase = initializer_factor
_lowercase = eos_token_id
_lowercase = pad_token_id
_lowercase = decoder_start_token_id
_lowercase = None
_lowercase = decoder_layers
def __UpperCAmelCase ( self : Dict ) -> Dict:
return TaConfig.from_pretrained('google/umt5-base' )
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ,__A : int ,__A : str ,__A : List[str]=None ,__A : List[str]=None ,__A : Any=None ,__A : List[Any]=None ,__A : str=None ,) -> Tuple:
if attention_mask is None:
_lowercase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowercase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowercase = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=__A )
if decoder_head_mask is None:
_lowercase = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=__A )
if cross_attn_head_mask is None:
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
_lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowercase = input_ids.clamp(self.pad_token_id + 1 )
_lowercase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowercase = self.get_config()
_lowercase = config.num_attention_heads
_lowercase = self.prepare_inputs_dict(__A ,__A ,__A )
return config, input_dict
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase , _lowercase = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return TaConfig(
vocab_size=166 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Dict ) -> Any:
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : List[str] ,__A : Dict ,__A : List[str] ,__A : List[Any] ,__A : Tuple ,__A : int ,) -> Tuple:
_lowercase = UMTaModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(
input_ids=__A ,decoder_input_ids=__A ,attention_mask=__A ,decoder_attention_mask=__A ,)
_lowercase = model(input_ids=__A ,decoder_input_ids=__A )
_lowercase = result.last_hidden_state
_lowercase = result.past_key_values
_lowercase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def __UpperCAmelCase ( self : List[Any] ,__A : Tuple ,__A : int ,__A : Any ,__A : Tuple ,__A : Any ,__A : Optional[int] ,) -> List[str]:
_lowercase = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
_lowercase = model(__A ,use_cache=__A )
_lowercase = model(__A )
_lowercase = model(__A ,use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
_lowercase , _lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowercase = model(__A )['last_hidden_state']
_lowercase = model(__A ,past_key_values=__A )['last_hidden_state']
# select random slice
_lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowercase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A ,__A ,atol=1e-3 ) )
def __UpperCAmelCase ( self : Any ,__A : List[str] ,__A : List[str] ,) -> int:
_lowercase = UMTaModel(config=__A ).to(__A ).half().eval()
_lowercase = model(**__A )['last_hidden_state']
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : str = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ : Dict = [0.8, 0.9]
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=__A ,opset_version=9 ,input_names=['input_ids', 'decoder_input_ids'] ,)
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def __UpperCAmelCase ( self : List[Any] ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowercase = self.model_tester.prepare_config_and_inputs()
_lowercase = config_and_inputs[0]
_lowercase = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
_lowercase = {
'head_mask': torch.zeros(config.num_layers ,config.num_heads ,device=__A ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__A ),
}
for attn_name, (name, mask) in zip(__A ,head_masking.items() ):
_lowercase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowercase = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=__A )
_lowercase = model.generate(
config_and_inputs[1]['input_ids'] ,num_beams=1 ,max_length=3 ,output_attentions=__A ,return_dict_in_generate=__A ,**__A ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowercase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def __UpperCAmelCase ( self : str ) -> List[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' ,return_dict=__A ).to(__A )
_lowercase = AutoTokenizer.from_pretrained('google/umt5-small' ,use_fast=__A ,legacy=__A )
_lowercase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowercase = tokenizer(__A ,return_tensors='pt' ,padding=__A ).input_ids
# fmt: off
_lowercase = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A ,__A )
_lowercase = model.generate(input_ids.to(__A ) )
_lowercase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowercase = tokenizer.batch_decode(__A )
self.assertEqual(__A ,__A ) | 67 | 1 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
snake_case = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
snake_case = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> int:
_lowercase = list(state_dict.keys() )
for name in state_dict_keys:
_lowercase = state_dict.pop(snake_case__ )
# emb -> embedding
if name.startswith('emb.' ):
_lowercase = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
_lowercase = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
_lowercase = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , snake_case__ )
# ffn -> feed_forward
_lowercase = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , snake_case__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
_lowercase = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
_lowercase = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
_lowercase = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
_lowercase = 'rwkv.' + name
_lowercase = weight
return state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any , snake_case__ :Any , snake_case__ :Optional[Any] , snake_case__ :Optional[int]=None , snake_case__ :List[Any]=None , snake_case__ :List[Any]=False , snake_case__ :Union[str, Any]=None ) -> Union[str, Any]:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
_lowercase = 5_0277
_lowercase = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
_lowercase = PreTrainedTokenizerFast(tokenizer_file=snake_case__ )
_lowercase = len(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
# 2. Build the config
_lowercase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_lowercase = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_lowercase = RwkvConfig(
vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(snake_case__ )
# 3. Download model file then convert state_dict
_lowercase = hf_hub_download(snake_case__ , snake_case__ )
_lowercase = torch.load(snake_case__ , map_location='cpu' )
_lowercase = convert_state_dict(snake_case__ )
# 4. Split in shards and save
_lowercase , _lowercase = shard_checkpoint(snake_case__ )
for shard_file, shard in shards.items():
torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
if index is not None:
_lowercase = os.path.join(snake_case__ , snake_case__ )
# Save the index as well
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
_lowercase = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '\n'
f.write(snake_case__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
_lowercase = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_lowercase = torch.load(os.path.join(snake_case__ , snake_case__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
_lowercase = AutoModelForCausalLM.from_pretrained(snake_case__ )
model.push_to_hub(snake_case__ , max_shard_size='2GB' )
tokenizer.push_to_hub(snake_case__ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
snake_case = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
) | 67 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=__A ,)
assert hasattr(self ,'env' )
def __UpperCAmelCase ( self : str ,__A : Tuple ) -> int:
# configuration for running training on smdistributed Model Parallel
_lowercase = {
'enabled': True,
'processes_per_host': 8,
}
_lowercase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowercase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowercase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__A ,instance_type=self.instance_type ,debugger_hook_config=__A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=__A ,py_version='py36' ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Any ) -> Optional[Any]:
TrainingJobAnalytics(__A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Optional[Any]:
# create estimator
_lowercase = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__A ) | 67 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.