code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __lowerCAmelCase :
_lowercase : Optional[int] = LEDConfig
_lowercase : Optional[Any] = {}
_lowercase : Optional[int] = """gelu"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=2_0 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=4 , ) -> int:
'''simple docstring'''
a__ : int =parent
a__ : int =batch_size
a__ : Dict =seq_length
a__ : str =is_training
a__ : Optional[int] =use_labels
a__ : Optional[Any] =vocab_size
a__ : List[str] =hidden_size
a__ : Dict =num_hidden_layers
a__ : str =num_attention_heads
a__ : str =intermediate_size
a__ : Dict =hidden_dropout_prob
a__ : Tuple =attention_probs_dropout_prob
a__ : List[str] =max_position_embeddings
a__ : Dict =eos_token_id
a__ : Tuple =pad_token_id
a__ : Optional[Any] =bos_token_id
a__ : int =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
a__ : Union[str, Any] =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
a__ : List[str] =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Any =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a__ : List[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a__ : List[Any] =tf.concat([input_ids, eos_tensor] , axis=1 )
a__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Tuple =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
a__ : int =prepare_led_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
a__ : str =tf.concat(
[tf.zeros_like(lowerCAmelCase__ )[:, :-1], tf.ones_like(lowerCAmelCase__ )[:, -1:]] , axis=-1 , )
a__ : Dict =global_attention_mask
return config, inputs_dict
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
a__ : Any =TFLEDModel(config=lowerCAmelCase__ ).get_decoder()
a__ : Tuple =inputs_dict["input_ids"]
a__ : Any =input_ids[:1, :]
a__ : List[str] =inputs_dict["attention_mask"][:1, :]
a__ : List[Any] =1
# first forward pass
a__ : Any =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
a__ , a__ : Optional[int] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a__ : Optional[Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ : str =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a__ : Optional[int] =tf.concat([input_ids, next_tokens] , axis=-1 )
a__ : List[str] =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a__ : List[Any] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
a__ : Tuple =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a__ : Any =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a__ : Optional[Any] =output_from_no_past[:, -3:, random_slice_idx]
a__ : int =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3 )
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
"""simple docstring"""
if attention_mask is None:
a__ : Optional[int] =tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a__ : Any =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a__ : int =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a__ : Optional[int] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : int = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowercase : Union[str, Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowercase : Optional[int] = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowercase : Union[str, Any] = True
_lowercase : List[str] = False
_lowercase : List[str] = False
_lowercase : Dict = False
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : List[str] =TFLEDModelTester(self )
a__ : Any =ConfigTester(self , config_class=lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ , a__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
a__ : List[str] =tf.zeros_like(inputs_dict["attention_mask"] )
a__ : Tuple =2
a__ : int =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
a__ : Dict =True
a__ : List[Any] =self.model_tester.seq_length
a__ : Dict =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowerCAmelCase__ ):
a__ : List[Any] =outputs.decoder_attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowerCAmelCase__ ):
a__ : Optional[int] =[t.numpy() for t in outputs.encoder_attentions]
a__ : Any =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
a__ : Optional[int] =True
a__ : List[Any] =False
a__ : Any =False
a__ : List[str] =model_class(lowerCAmelCase__ )
a__ : Optional[Any] =model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
a__ : Optional[int] =len(lowerCAmelCase__ )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
if self.is_encoder_decoder:
a__ : Union[str, Any] =model_class(lowerCAmelCase__ )
a__ : Dict =model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_decoder_attentions_output(lowerCAmelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a__ : str =True
a__ : Union[str, Any] =model_class(lowerCAmelCase__ )
a__ : Optional[Any] =model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
# Check attention is always last and order is fine
a__ : int =True
a__ : Tuple =True
a__ : Tuple =model_class(lowerCAmelCase__ )
a__ : Optional[Any] =model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCAmelCase__ ) )
self.assertEqual(model.config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _lowercase ( self ) -> str:
'''simple docstring'''
pass
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return tf.constant(SCREAMING_SNAKE_CASE , dtype=tf.intaa )
UpperCAmelCase : Dict = 1E-4
@slow
@require_tf
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Tuple =TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
a__ : Any =_long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : int =_long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : List[str] =prepare_led_inputs_dict(model.config , lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple =model(**lowerCAmelCase__ )[0]
a__ : Optional[int] =(1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , lowerCAmelCase__ )
# change to expected output here
a__ : Optional[int] =tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-3 )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Tuple =TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
a__ : Union[str, Any] =_long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : Dict =_long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
a__ : int =prepare_led_inputs_dict(model.config , lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Optional[int] =model(**lowerCAmelCase__ )[0]
a__ : Any =(1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , lowerCAmelCase__ )
# change to expected output here
a__ : Dict =tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-3 , rtol=1E-3 )
| 95 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A =logging.get_logger(__name__)
__A =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'train'
lowerCAmelCase__ = 'dev'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> List[str]:
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features["features"]
lowerCamelCase_ = self.old_features.get("dataset" , lowercase )
lowerCamelCase_ = self.old_features.get("examples" , lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
lowerCamelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 19 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Any = logging.get_logger(__name__)
def _A (lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a = 'huggingface/label-files'
_a = 'imagenet-1k-id2label.json'
_a = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) )
_a = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
_a = {v: k for k, v in idalabel.items()}
_a = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_a = BitConfig(
conv_layer=lowerCAmelCase__ , num_labels=10_00 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def _A (lowerCAmelCase__ :str ) -> Tuple:
'''simple docstring'''
if "stem.conv" in name:
_a = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_a = name.replace('blocks' , 'layers' )
if "head.fc" in name:
_a = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
_a = 'bit.' + name
if "bit" not in name and "classifier" not in name:
_a = 'bit.encoder.' + name
return name
def _A () -> str:
'''simple docstring'''
_a = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any]=False ) -> Tuple:
'''simple docstring'''
_a = get_config(lowerCAmelCase__ )
# load original model from timm
_a = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model
_a = timm_model.state_dict()
for key in state_dict.copy().keys():
_a = state_dict.pop(lowerCAmelCase__ )
_a = val.squeeze() if 'head' in key else val
# load HuggingFace model
_a = BitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
_a = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
_a = transform.transforms
_a = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
_a = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_a = prepare_img()
_a = transform(lowerCAmelCase__ ).unsqueeze(0 )
_a = processor(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
_a = model(lowerCAmelCase__ )
_a = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
_a = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(f'ybelkada/{model_name}' )
processor.push_to_hub(f'ybelkada/{model_name}' )
if __name__ == "__main__":
a_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
a_ : Any = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 359 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def _A (lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :int ) -> float:
'''simple docstring'''
_a = x
_a = y
for step in range(lowerCAmelCase__ ): # noqa: B007
_a = a * a - b * b + x
_a = 2 * a * b + y
_a = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _A (lowerCAmelCase__ :float ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _A (lowerCAmelCase__ :float ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ , 1 , 1 ) )
def _A (lowerCAmelCase__ :int = 8_00 , lowerCAmelCase__ :int = 6_00 , lowerCAmelCase__ :float = -0.6 , lowerCAmelCase__ :float = 0 , lowerCAmelCase__ :float = 3.2 , lowerCAmelCase__ :int = 50 , lowerCAmelCase__ :bool = True , ) -> Image.Image:
'''simple docstring'''
_a = Image.new('RGB' , (image_width, image_height) )
_a = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase__ ):
for image_y in range(lowerCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
_a = figure_width / image_width * image_height
_a = figure_center_x + (image_x / image_width - 0.5) * figure_width
_a = figure_center_y + (image_y / image_height - 0.5) * figure_height
_a = get_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_a = get_color_coded_rgb(lowerCAmelCase__ )
else:
_a = get_black_and_white_rgb(lowerCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a_ : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 104 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] ,*_UpperCAmelCase : Dict ,**_UpperCAmelCase : Dict ):
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' ,__UpperCAmelCase ,)
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
| 89 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'''vocab_file''': '''spiece.model'''}
_lowerCAmelCase = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
_lowerCAmelCase = {
'''AI-Sweden/gpt-sw3-126m''': 2048,
'''AI-Sweden/gpt-sw3-350m''': 2048,
'''AI-Sweden/gpt-sw3-1.6b''': 2048,
'''AI-Sweden/gpt-sw3-6.7b''': 2048,
'''AI-Sweden/gpt-sw3-20b''': 2048,
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : str = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
lowerCAmelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ : Dict = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
lowerCAmelCase__ : Tuple = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase__ : Union[str, Any] = """<|endoftext|>""" if eos_token is None else eos_token
lowerCAmelCase__ : Dict = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase__ : Any = unk_token if pad_token is None else pad_token
lowerCAmelCase__ : Dict = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase__ : List[str] = """<pad>""" if pad_token is None else pad_token
lowerCAmelCase__ : Optional[int] = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,)
lowerCAmelCase__ : Optional[int] = do_lower_case
lowerCAmelCase__ : Dict = remove_space
lowerCAmelCase__ : Optional[Any] = keep_accents
lowerCAmelCase__ : int = vocab_file
lowerCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase__ : int = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase__ : List[str] = re.compile(
F"""[{''.join(map(__UpperCAmelCase ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self ) -> Any:
lowerCAmelCase__ : int = self.__dict__.copy()
lowerCAmelCase__ : Optional[int] = None
return state
def __setstate__( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[str] = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCAmelCase__ : Tuple = {}
lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCAmelCase_ ( self ) -> int:
return len(self.sp_model )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : Tuple = self.non_printing_characters_re.sub("""""" ,__UpperCAmelCase )
# Normalize whitespaces
lowerCAmelCase__ : List[Any] = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
lowerCAmelCase__ : List[Any] = unicodedata.normalize("""NFC""" ,__UpperCAmelCase )
return text
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[Any] = self.preprocess_text(__UpperCAmelCase )
return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
return self.sp_model.PieceToId(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
return self.sp_model.IdToPiece(__UpperCAmelCase )
@staticmethod
def UpperCAmelCase_ ( __UpperCAmelCase ) -> str:
return out_string
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Optional[int] = """"""
lowerCAmelCase__ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Optional[Any] = []
else:
current_sub_tokens.append(__UpperCAmelCase )
lowerCAmelCase__ : Any = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string
def UpperCAmelCase_ ( self ) -> Dict[str, int]:
lowerCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Optional[int] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase ,"""wb""" ) as fi:
lowerCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = self.preprocess_text(__UpperCAmelCase )
lowerCAmelCase__ : int = self.sp_model.encode(__UpperCAmelCase )
else:
lowerCAmelCase__ : int = [self.preprocess_text(__UpperCAmelCase ) for t in text]
lowerCAmelCase__ : Any = self.sp_model.encode(__UpperCAmelCase )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase__ : Tuple = torch.tensor(__UpperCAmelCase )
return token_ids
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
return self.sp_model.decode(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[int]:
lowerCAmelCase__ : List[Any] = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
lowerCAmelCase__ : Any = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__UpperCAmelCase ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=__UpperCAmelCase )
| 37 | 0 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> tuple[int, float, str]:
_lowercase : List[str] = cipher_alphabet or [chr(lowerCamelCase_ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_lowercase : List[str] = {
'a': 0.0_84_97,
'b': 0.0_14_92,
'c': 0.0_22_02,
'd': 0.0_42_53,
'e': 0.1_11_62,
'f': 0.0_22_28,
'g': 0.0_20_15,
'h': 0.0_60_94,
'i': 0.0_75_46,
'j': 0.0_01_53,
'k': 0.0_12_92,
'l': 0.0_40_25,
'm': 0.0_24_06,
'n': 0.0_67_49,
'o': 0.0_75_07,
'p': 0.0_19_29,
'q': 0.0_00_95,
'r': 0.0_75_87,
's': 0.0_63_27,
't': 0.0_93_56,
'u': 0.0_27_58,
'v': 0.0_09_78,
'w': 0.0_25_60,
'x': 0.0_01_50,
'y': 0.0_19_94,
'z': 0.0_00_77,
}
else:
# Custom frequencies dictionary
_lowercase : int = frequencies_dict
if not case_sensitive:
_lowercase : str = ciphertext.lower()
# Chi squared statistic values
_lowercase : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(lowerCamelCase_ ) ):
_lowercase : Any = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_lowercase : List[str] = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowerCamelCase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_lowercase : str = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_lowercase : Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_lowercase : Union[str, Any] = decrypted_with_shift.lower().count(lowerCamelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_lowercase : str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_lowercase : List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_lowercase : Union[str, Any] = decrypted_with_shift.count(lowerCamelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_lowercase : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_lowercase : Dict = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_lowercase : Union[str, Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowerCamelCase_ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_lowercase : int = min(
lowerCamelCase_ , key=lowerCamelCase_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
_lowercase
) : str = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 358 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
_lowercase : Any = TOKENIZER_CLASSES
else:
_lowercase : Tuple = {tokenizer_name: getattr(lowerCamelCase_ , tokenizer_name + 'Fast' )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
_lowercase : Union[str, Any] = TOKENIZER_CLASSES[tokenizer_name]
_lowercase : Any = True
if checkpoint_name is None:
_lowercase : int = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_lowercase : List[Any] = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
_lowercase : Union[str, Any] = tokenizer_class.from_pretrained(lowerCamelCase_ , force_download=lowerCamelCase_ )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
_lowercase , _lowercase : str = checkpoint.split('/' )
_lowercase : Any = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
elif add_prefix:
_lowercase : Union[str, Any] = checkpoint
_lowercase : List[str] = dump_path
else:
_lowercase : str = None
_lowercase : Any = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_lowercase : Tuple = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_lowercase : List[Any] = file_path.split(lowerCamelCase_ )[-1][0]
if next_char == "/":
_lowercase : Any = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
_lowercase : Optional[Any] = tokenizer.save_pretrained(
lowerCamelCase_ , legacy_format=lowerCamelCase_ , filename_prefix=lowerCamelCase_ )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowerCamelCase_ )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 84 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case :str = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[Any] = ['''YolosFeatureExtractor''']
__snake_case :Optional[Any] = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__snake_case :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int ,A_ : int ) -> Union[str, Any]:
A = n
A = [None] * self.n
A = 0 # index of the first element
A = 0
A = 0
def __len__( self : int ) -> int:
return self.size
def _SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
return self.size == 0
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
return False if self.is_empty() else self.array[self.front]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> int:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
A = data
A = (self.rear + 1) % self.n
self.size += 1
return self
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
if self.size == 0:
raise Exception('UNDERFLOW' )
A = self.array[self.front]
A = None
A = (self.front + 1) % self.n
self.size -= 1
return temp | 74 | 0 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : str = 16
lowercase__ : int = 32
def a__ ( lowercase : Accelerator, lowercase : int = 16 ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_UpperCamelCase = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(lowercase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowercase, max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCamelCase = datasets.map(
lowercase, batched=lowercase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(lowercase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCamelCase = 8
else:
_UpperCamelCase = None
return tokenizer.pad(
lowercase, padding='''longest''', max_length=lowercase, pad_to_multiple_of=lowercase, return_tensors='''pt''', )
# Instantiate dataloaders.
_UpperCamelCase = DataLoader(
tokenized_datasets['''train'''], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
_UpperCamelCase = DataLoader(
tokenized_datasets['''validation'''], shuffle=lowercase, collate_fn=lowercase, batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : str = mocked_dataloaders # noqa: F811
def a__ ( lowercase : List[Any], lowercase : List[str] ) -> Any:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', lowercase ) == "1":
_UpperCamelCase = 2
# Initialize accelerator
_UpperCamelCase = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase = config['''lr''']
_UpperCamelCase = int(config['''num_epochs'''] )
_UpperCamelCase = int(config['''seed'''] )
_UpperCamelCase = int(config['''batch_size'''] )
_UpperCamelCase = evaluate.load('''glue''', '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowercase )
def inner_training_loop(lowercase : List[Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase = AdamW(params=model.parameters(), lr=lowercase )
_UpperCamelCase , _UpperCamelCase = get_dataloaders(lowercase, lowercase )
# Instantiate scheduler
_UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=lowercase, num_warmup_steps=100, num_training_steps=(len(lowercase ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare(
lowercase, lowercase, lowercase, lowercase, lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCamelCase = model(**lowercase )
_UpperCamelCase = outputs.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase = model(**lowercase )
_UpperCamelCase = outputs.logits.argmax(dim=-1 )
_UpperCamelCase , _UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowercase, references=lowercase, )
_UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""", lowercase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=lowercase, default=lowercase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase, lowercase )
if __name__ == "__main__":
main()
| 287 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''c'''] )
self.assertEqual(lowerCAmelCase__ , [2] )
# Out indices set to match out features
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(['''a''', '''c'''] , lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''a''', '''c'''] )
self.assertEqual(lowerCAmelCase__ , [0, 2] )
# Out features set to match out indices
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(lowerCAmelCase__ , [0, 2] , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''a''', '''c'''] )
self.assertEqual(lowerCAmelCase__ , [0, 2] )
# Out features selected from negative indices
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(lowerCAmelCase__ , [-3, -1] , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''a''', '''c'''] )
self.assertEqual(lowerCAmelCase__ , [-3, -1] )
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , lowerCAmelCase__ )
# Out features must be a list
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(lowerCAmelCase__ , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(lowerCAmelCase__ , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = BackboneMixin()
_UpperCamelCase = ['''a''', '''b''', '''c''']
_UpperCamelCase = ['''a''', '''c''']
_UpperCamelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCamelCase = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCamelCase = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 287 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE : int = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE : Optional[int] = num_samples * [prompt]
SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(a )
SCREAMING_SNAKE_CASE : Tuple = replicate(a )
SCREAMING_SNAKE_CASE : Dict = shard(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : Dict = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE : str = sd_pipe(a , a , a , num_inference_steps=25 , jit=a )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE : List[str] = images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE : Any = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = "stabilityai/stable-diffusion-2"
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(a , subfolder="scheduler" )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = FlaxStableDiffusionPipeline.from_pretrained(
a , scheduler=a , revision="bf16" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE : Tuple = scheduler_params
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Optional[Any] = jax.device_count()
SCREAMING_SNAKE_CASE : int = num_samples * [prompt]
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.prepare_inputs(a )
SCREAMING_SNAKE_CASE : Tuple = replicate(a )
SCREAMING_SNAKE_CASE : Optional[int] = shard(a )
SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE : str = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE : str = sd_pipe(a , a , a , num_inference_steps=25 , jit=a )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 76 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=30 , A=4_00 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=True , A=1 / 2_55 , A=True , ) -> str:
'''simple docstring'''
lowerCamelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = num_channels
lowerCamelCase = min_resolution
lowerCamelCase = max_resolution
lowerCamelCase = do_resize
lowerCamelCase = size
lowerCamelCase = do_normalize
lowerCamelCase = image_mean
lowerCamelCase = image_std
lowerCamelCase = do_rescale
lowerCamelCase = rescale_factor
lowerCamelCase = do_pad
def __A ( self ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __A ( self , A , A=False ) -> List[Any]:
'''simple docstring'''
if not batched:
lowerCamelCase = image_inputs[0]
if isinstance(A , Image.Image ):
lowerCamelCase , lowerCamelCase = image.size
else:
lowerCamelCase , lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase = int(self.size["""shortest_edge"""] * h / w )
lowerCamelCase = self.size["""shortest_edge"""]
elif w > h:
lowerCamelCase = self.size["""shortest_edge"""]
lowerCamelCase = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCamelCase = self.size["""shortest_edge"""]
lowerCamelCase = self.size["""shortest_edge"""]
else:
lowerCamelCase = []
for image in image_inputs:
lowerCamelCase , lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase = max(A , key=lambda A : item[0] )[0]
lowerCamelCase = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = YolosImageProcessor if is_vision_available() else None
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = YolosImageProcessingTester(self )
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , """image_mean""" ) )
self.assertTrue(hasattr(A , """image_std""" ) )
self.assertTrue(hasattr(A , """do_normalize""" ) )
self.assertTrue(hasattr(A , """do_resize""" ) )
self.assertTrue(hasattr(A , """size""" ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , A )
lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A , batched=A )
lowerCamelCase = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase = image_processing(A , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase = image_processing(A , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
lowerCamelCase = self.image_processing_class(do_resize=A , do_normalize=A , do_rescale=A )
# create random PyTorch tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowerCamelCase = image_processing_a.pad(A , return_tensors="""pt""" )
lowerCamelCase = image_processing_a(A , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCamelCase = json.loads(f.read() )
lowerCamelCase = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
lowerCamelCase = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
lowerCamelCase = image_processing(images=A , annotations=A , return_tensors="""pt""" )
# verify pixel values
lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , A )
lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A ) )
# verify boxes
lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A )
lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A , atol=1e-3 ) )
# verify image_id
lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A ) )
# verify is_crowd
lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A ) )
# verify class_labels
lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A ) )
# verify orig_size
lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A ) )
# verify size
lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A ) )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCamelCase = json.loads(f.read() )
lowerCamelCase = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
lowerCamelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCamelCase = YolosImageProcessor(format="""coco_panoptic""" )
lowerCamelCase = image_processing(images=A , annotations=A , masks_path=A , return_tensors="""pt""" )
# verify pixel values
lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , A )
lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A ) )
# verify boxes
lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A )
lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A , atol=1e-3 ) )
# verify image_id
lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A ) )
# verify is_crowd
lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A ) )
# verify class_labels
lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A ) )
# verify masks
lowerCamelCase = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , A )
# verify orig_size
lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A ) )
# verify size
lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A ) )
| 252 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : Any = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "roc_bert"
def __init__( self : Tuple , UpperCAmelCase_ : Dict=3_0_5_2_2 , UpperCAmelCase_ : List[str]=7_6_8 , UpperCAmelCase_ : Any=1_2 , UpperCAmelCase_ : str=1_2 , UpperCAmelCase_ : List[str]=3_0_7_2 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : str=5_1_2 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=1e-12 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : List[Any]="absolute" , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=7_6_8 , UpperCAmelCase_ : Tuple=9_1_0 , UpperCAmelCase_ : Dict=5_1_2 , UpperCAmelCase_ : Any=2_4_8_5_8 , UpperCAmelCase_ : List[str]=True , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
a : Optional[int] = vocab_size
a : Optional[int] = max_position_embeddings
a : Union[str, Any] = hidden_size
a : Dict = num_hidden_layers
a : List[str] = num_attention_heads
a : int = intermediate_size
a : Tuple = hidden_act
a : Any = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : int = initializer_range
a : int = type_vocab_size
a : Dict = layer_norm_eps
a : List[str] = use_cache
a : Optional[Any] = enable_pronunciation
a : Union[str, Any] = enable_shape
a : Union[str, Any] = pronunciation_embed_dim
a : Optional[int] = pronunciation_vocab_size
a : int = shape_embed_dim
a : int = shape_vocab_size
a : Tuple = concat_input
a : str = position_embedding_type
a : Union[str, Any] = classifier_dropout
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
| 352 | '''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase : int = """true"""
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : int=82 , snake_case : Tuple=16 ) -> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
a : List[str] = RegressionModel()
a : Union[str, Any] = deepcopy(snake_case )
a : Dict = RegressionDataset(length=snake_case )
a : Dict = DataLoader(snake_case , batch_size=snake_case )
model.to(accelerator.device )
a , a : Optional[int] = accelerator.prepare(snake_case , snake_case )
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
a : Any = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case : int ):
a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case )
return outputs
with accelerator.main_process_first():
a : Dict = dataset.map(
snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
a : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case : Optional[Any] ):
if use_longest:
return tokenizer.pad(snake_case , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(snake_case , shuffle=snake_case , collate_fn=snake_case , batch_size=16 )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
a : int = Accelerator(dispatch_batches=snake_case , split_batches=snake_case )
a : List[str] = get_dataloader(snake_case , not dispatch_batches )
a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case )
a , a : Optional[Any] = accelerator.prepare(snake_case , snake_case )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
a : Dict = []
for batch in dataloader:
a , a : Any = batch.values()
with torch.no_grad():
a : Tuple = model(snake_case )
a , a : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a , a : List[str] = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case )
targs.append(snake_case )
a , a : Any = torch.cat(snake_case ), torch.cat(snake_case )
return logits, targs
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Dict=82 , snake_case : str=False , snake_case : List[str]=False , snake_case : List[Any]=16 ) -> Optional[int]:
"""simple docstring"""
a , a , a : int = get_basic_setup(snake_case , snake_case , snake_case )
a , a : int = generate_predictions(snake_case , snake_case , snake_case )
assert (
len(snake_case ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case )}"""
def SCREAMING_SNAKE_CASE__ ( snake_case : bool = False , snake_case : bool = False ) -> List[str]:
"""simple docstring"""
a : int = evaluate.load('glue' , 'mrpc' )
a , a : Tuple = get_mrpc_setup(snake_case , snake_case )
# First do baseline
a , a , a : Tuple = setup['no']
model.to(snake_case )
model.eval()
for batch in dataloader:
batch.to(snake_case )
with torch.inference_mode():
a : List[Any] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case , references=batch['labels'] )
a : Tuple = metric.compute()
# Then do distributed
a , a , a : Tuple = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
a : List[str] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
a : Optional[int] = batch['labels']
a , a : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case , references=snake_case )
a : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
a : Dict = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case , snake_case )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a : List[Any] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
a : Optional[Any] = Accelerator()
test_torch_metrics(snake_case , 512 )
accelerator.state._reset_state()
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 345 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'wav2vec2'
def __init__( self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1e-5 , lowercase="group" , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=128 , lowercase=16 , lowercase=False , lowercase=True , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase=320 , lowercase=2 , lowercase=0.1 , lowercase=100 , lowercase=256 , lowercase=256 , lowercase=0.1 , lowercase="sum" , lowercase=False , lowercase=False , lowercase=256 , lowercase=(512, 512, 512, 512, 1500) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=512 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , lowercase=None , **lowercase , ) -> Any:
'''simple docstring'''
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
A__ = hidden_size
A__ = feat_extract_norm
A__ = feat_extract_activation
A__ = list(lowercase )
A__ = list(lowercase )
A__ = list(lowercase )
A__ = conv_bias
A__ = num_conv_pos_embeddings
A__ = num_conv_pos_embedding_groups
A__ = len(self.conv_dim )
A__ = num_hidden_layers
A__ = intermediate_size
A__ = hidden_act
A__ = num_attention_heads
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = feat_proj_dropout
A__ = final_dropout
A__ = layerdrop
A__ = layer_norm_eps
A__ = initializer_range
A__ = vocab_size
A__ = do_stable_layer_norm
A__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ = apply_spec_augment
A__ = mask_time_prob
A__ = mask_time_length
A__ = mask_time_min_masks
A__ = mask_feature_prob
A__ = mask_feature_length
A__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A__ = num_codevectors_per_group
A__ = num_codevector_groups
A__ = contrastive_logits_temperature
A__ = feat_quantizer_dropout
A__ = num_negatives
A__ = codevector_dim
A__ = proj_codevector_dim
A__ = diversity_loss_weight
# ctc loss
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# adapter
A__ = add_adapter
A__ = adapter_kernel_size
A__ = adapter_stride
A__ = num_adapter_layers
A__ = output_hidden_size or hidden_size
A__ = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A__ = list(lowercase )
A__ = list(lowercase )
A__ = list(lowercase )
A__ = xvector_output_dim
@property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 68 |
import string
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> None:
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
A__ = ""
for symbol in message:
if symbol in string.ascii_uppercase:
A__ = string.ascii_uppercase.find(SCREAMING_SNAKE_CASE_ )
A__ = num - key
if num < 0:
A__ = num + len(string.ascii_uppercase )
A__ = translated + string.ascii_uppercase[num]
else:
A__ = translated + symbol
print(F'Decryption using Key #{key}: {translated}' )
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
A__ = input("Encrypted message: " )
A__ = message.upper()
decrypt(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 68 | 1 |
"""simple docstring"""
lowerCamelCase__ = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich | 182 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
) | 182 | 1 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( __lowerCAmelCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , """width_multiplier""" ) )
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=64 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_="swish" , lowerCAmelCase_=3 , lowerCAmelCase_=32 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=10 , lowerCAmelCase_=None , lowerCAmelCase_=0.25 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , ) -> List[str]:
_A = parent
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = make_divisible(5_12 * width_multiplier , divisor=8 )
_A = hidden_act
_A = conv_kernel_size
_A = output_stride
_A = classifier_dropout_prob
_A = use_labels
_A = is_training
_A = num_labels
_A = initializer_range
_A = scope
_A = width_multiplier
_A = ffn_dropout
_A = attn_dropout
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.num_labels )
_A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self ) -> List[Any]:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_A = MobileViTVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_A = self.num_labels
_A = MobileViTVaForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_A = self.num_labels
_A = MobileViTVaForSemanticSegmentation(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_A = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase ( self ) -> Any:
_A = self.prepare_config_and_inputs()
_A , _A , _A , _A = config_and_inputs
_A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :List[str] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase :List[str] = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :Dict = False
lowerCamelCase :Tuple = False
lowerCamelCase :Dict = False
def UpperCAmelCase ( self ) -> Tuple:
_A = MobileViTVaModelTester(self )
_A = MobileViTVaConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def UpperCAmelCase ( self ) -> int:
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def UpperCAmelCase ( self ) -> int:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def UpperCAmelCase ( self ) -> int:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase ( self ) -> str:
pass
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(lowerCAmelCase_ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_A = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_A = outputs.hidden_states
_A = 5
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_A = 2
for i in range(len(lowerCAmelCase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> List[str]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = MobileViTVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case ( ) -> Any:
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> int:
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self ) -> List[str]:
_A = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
lowerCAmelCase_ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_A = model(**lowerCAmelCase_ )
# verify the logits
_A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_A = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ) -> Dict:
_A = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
_A = model.to(lowerCAmelCase_ )
_A = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
_A = prepare_img()
_A = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_A = model(**lowerCAmelCase_ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCAmelCase_ )
_A = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=lowerCAmelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ) -> str:
_A = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
_A = model.to(lowerCAmelCase_ )
_A = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
_A = prepare_img()
_A = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_A = model(**lowerCAmelCase_ )
_A = outputs.logits.detach().cpu()
_A = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase_ , target_sizes=[(50, 60)] )
_A = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase_ )
_A = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase_ )
_A = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase_ )
| 180 | import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
_A = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
_A = tempfile.mkdtemp()
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(self.tmpdirname , lowerCAmelCase_ )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
# load decoder from hub
_A = """hf-internal-testing/ngram-beam-search-decoder"""
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Tuple:
_A = self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCAmelCase_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Any:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Any:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.get_tokenizer()
_A = self.get_feature_extractor()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
_A = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCAmelCase_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_A = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(lowerCAmelCase_ , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=lowerCAmelCase_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCAmelCase ( self ) -> Any:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = floats_list((3, 10_00) )
_A = feature_extractor(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor(lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> Any:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = """This is a test string"""
_A = processor(text=lowerCAmelCase_ )
_A = tokenizer(lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self , lowerCAmelCase_=(2, 10, 16) , lowerCAmelCase_=77 ) -> Tuple:
np.random.seed(lowerCAmelCase_ )
return np.random.rand(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_A = processor.decode(lowerCAmelCase_ )
_A = decoder.decode_beams(lowerCAmelCase_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_A = processor.batch_decode(lowerCAmelCase_ )
else:
with get_context(lowerCAmelCase_ ).Pool() as pool:
_A = processor.batch_decode(lowerCAmelCase_ , lowerCAmelCase_ )
_A = list(lowerCAmelCase_ )
with get_context("""fork""" ).Pool() as p:
_A = decoder.decode_beams_batch(lowerCAmelCase_ , lowerCAmelCase_ )
_A , _A , _A = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.logit_score )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.lm_score )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits()
_A = 15
_A = -20.0
_A = -4.0
_A = processor.batch_decode(
lowerCAmelCase_ , beam_width=lowerCAmelCase_ , beam_prune_logp=lowerCAmelCase_ , token_min_logp=lowerCAmelCase_ , )
_A = decoded_processor_out.text
_A = list(lowerCAmelCase_ )
with get_context("""fork""" ).Pool() as pool:
_A = decoder.decode_beams_batch(
lowerCAmelCase_ , lowerCAmelCase_ , beam_width=lowerCAmelCase_ , beam_prune_logp=lowerCAmelCase_ , token_min_logp=lowerCAmelCase_ , )
_A = [d[0][0] for d in decoded_decoder_out]
_A = [d[0][2] for d in decoded_decoder_out]
_A = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , lowerCAmelCase_ )
self.assertTrue(np.array_equal(lowerCAmelCase_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , lowerCAmelCase_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(lowerCAmelCase_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , lowerCAmelCase_ , atol=1E-3 ) )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits()
_A = 2.0
_A = 5.0
_A = -20.0
_A = True
_A = processor.batch_decode(
lowerCAmelCase_ , alpha=lowerCAmelCase_ , beta=lowerCAmelCase_ , unk_score_offset=lowerCAmelCase_ , lm_score_boundary=lowerCAmelCase_ , )
_A = decoded_processor_out.text
_A = list(lowerCAmelCase_ )
decoder.reset_params(
alpha=lowerCAmelCase_ , beta=lowerCAmelCase_ , unk_score_offset=lowerCAmelCase_ , lm_score_boundary=lowerCAmelCase_ , )
with get_context("""fork""" ).Pool() as pool:
_A = decoder.decode_beams_batch(
lowerCAmelCase_ , lowerCAmelCase_ , )
_A = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , lowerCAmelCase_ )
_A = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_A = os.listdir(lowerCAmelCase_ )
_A = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = snapshot_download("""hf-internal-testing/processor_with_lm""" )
_A = WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase_ )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_A = os.listdir(lowerCAmelCase_ )
_A = os.listdir(lowerCAmelCase_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = floats_list((3, 10_00) )
_A = processor_wavaveca(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor_auto(lowerCAmelCase_ , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_A = self._get_dummy_logits()
_A = processor_wavaveca.batch_decode(lowerCAmelCase_ )
_A = processor_auto.batch_decode(lowerCAmelCase_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_A = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase ( self ) -> Any:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = self._get_dummy_logits()[0]
_A = processor.decode(lowerCAmelCase_ , output_word_offsets=lowerCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def UpperCAmelCase ( self ) -> Any:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = self._get_dummy_logits()
_A = processor.batch_decode(lowerCAmelCase_ , output_word_offsets=lowerCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(lowerCAmelCase_ , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCAmelCase ( self ) -> Any:
import torch
_A = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=lowerCAmelCase_ )
_A = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
_A = iter(lowerCAmelCase_ )
_A = next(lowerCAmelCase_ )
_A = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
_A = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_A = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
_A = model(lowerCAmelCase_ ).logits.cpu().numpy()
_A = processor.decode(logits[0] , output_word_offsets=lowerCAmelCase_ )
_A = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_A = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
_A = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(lowerCAmelCase_ , """word""" ) ) , lowerCAmelCase_ )
self.assertEqual(""" """.join(self.get_from_offsets(lowerCAmelCase_ , """word""" ) ) , output.text )
# output times
_A = torch.tensor(self.get_from_offsets(lowerCAmelCase_ , """start_time""" ) )
_A = torch.tensor(self.get_from_offsets(lowerCAmelCase_ , """end_time""" ) )
# fmt: off
_A = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_A = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=0.01 ) )
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=0.01 ) )
| 180 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class A__ ( a_ ):
"""simple docstring"""
UpperCamelCase_ : Any = '''efficientnet'''
def __init__( self : Union[str, Any] , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : int = 6_0_0 , lowerCAmelCase__ : float = 2.0 , lowerCAmelCase__ : float = 3.1 , lowerCAmelCase__ : int = 8 , lowerCAmelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase__ : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , lowerCAmelCase__ : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , lowerCAmelCase__ : List[int] = [] , lowerCAmelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase__ : float = 0.25 , lowerCAmelCase__ : str = "swish" , lowerCAmelCase__ : int = 2_5_6_0 , lowerCAmelCase__ : str = "mean" , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : float = 0.001 , lowerCAmelCase__ : float = 0.99 , lowerCAmelCase__ : float = 0.5 , lowerCAmelCase__ : float = 0.2 , **lowerCAmelCase__ : str , ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowercase_ )
_UpperCAmelCase : Tuple = num_channels
_UpperCAmelCase : str = image_size
_UpperCAmelCase : Union[str, Any] = width_coefficient
_UpperCAmelCase : Any = depth_coefficient
_UpperCAmelCase : Optional[int] = depth_divisor
_UpperCAmelCase : Union[str, Any] = kernel_sizes
_UpperCAmelCase : int = in_channels
_UpperCAmelCase : int = out_channels
_UpperCAmelCase : Tuple = depthwise_padding
_UpperCAmelCase : Dict = strides
_UpperCAmelCase : int = num_block_repeats
_UpperCAmelCase : str = expand_ratios
_UpperCAmelCase : Union[str, Any] = squeeze_expansion_ratio
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Any = hidden_dim
_UpperCAmelCase : List[Any] = pooling_type
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : Tuple = batch_norm_eps
_UpperCAmelCase : Tuple = batch_norm_momentum
_UpperCAmelCase : str = dropout_rate
_UpperCAmelCase : str = drop_connect_rate
_UpperCAmelCase : Optional[Any] = sum(lowercase_ ) * 4
class A__ ( a_ ):
"""simple docstring"""
UpperCamelCase_ : Any = version.parse('''1.11''' )
@property
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return 1e-5 | 357 | '''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
if not isinstance(a_, a_ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(a_, a_ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
_UpperCAmelCase : List[str] = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(a_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 0 |
"""simple docstring"""
def A ( snake_case :str , snake_case :int ) -> list:
__UpperCamelCase = word.split()
def justify(snake_case :list , snake_case :int , snake_case :int ) -> str:
__UpperCamelCase = max_width - width
__UpperCamelCase = len(snake_case )
if len(snake_case ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__UpperCamelCase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__UpperCamelCase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__UpperCamelCase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(snake_case ):
num_spaces_between_words_list[i] += 1
__UpperCamelCase = []
for i in range(snake_case ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(snake_case )
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = 0
for word in words:
if width + len(snake_case ) + len(snake_case ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(snake_case )
width += len(snake_case )
else:
# justify the line and add it to result
answer.append(justify(snake_case , snake_case , snake_case ) )
# reset new line and new width
__UpperCamelCase , __UpperCamelCase = [word], len(snake_case )
__UpperCamelCase = max_width - width - len(snake_case )
answer.append(' '.join(snake_case ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 316 |
"""simple docstring"""
def A ( snake_case :int ) -> int:
__UpperCamelCase = [1]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0, 0, 0
__UpperCamelCase = ugly_nums[ia] * 2
__UpperCamelCase = ugly_nums[ia] * 3
__UpperCamelCase = ugly_nums[ia] * 5
for _ in range(1 , snake_case ):
__UpperCamelCase = min(snake_case , snake_case , snake_case )
ugly_nums.append(snake_case )
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__UpperCamelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_0_0) = }''')
| 316 | 1 |
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_A = """src/transformers"""
_A = """docs/source/en"""
_A = """."""
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
with open(lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase__ : Dict = f.readlines()
# Find the start prompt.
UpperCAmelCase__ : Optional[Any] = 0
while not lines[start_index].startswith(lowerCAmelCase ):
start_index += 1
start_index += 1
UpperCAmelCase__ : str = start_index
while not lines[end_index].startswith(lowerCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_A = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
_A = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
_A = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_A = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
_A = direct_transformers_import(TRANSFORMERS_PATH)
def a__ ( lowerCAmelCase ) -> Optional[Any]:
UpperCAmelCase__ : Any = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , lowerCAmelCase )
return [m.group(0 ) for m in matches]
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
UpperCAmelCase__ : Optional[Any] = 2 if text == """✅""" or text == """❌""" else len(lowerCAmelCase )
UpperCAmelCase__ : str = (width - text_length) // 2
UpperCAmelCase__ : Any = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : Tuple = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase__ : Dict = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
UpperCAmelCase__ : Dict = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
UpperCAmelCase__ : str = collections.defaultdict(lowerCAmelCase )
UpperCAmelCase__ : str = collections.defaultdict(lowerCAmelCase )
UpperCAmelCase__ : str = collections.defaultdict(lowerCAmelCase )
UpperCAmelCase__ : Any = collections.defaultdict(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = collections.defaultdict(lowerCAmelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase ):
UpperCAmelCase__ : Dict = None
if attr_name.endswith("""Tokenizer""" ):
UpperCAmelCase__ : Tuple = slow_tokenizers
UpperCAmelCase__ : Dict = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
UpperCAmelCase__ : List[str] = fast_tokenizers
UpperCAmelCase__ : Any = attr_name[:-13]
elif _re_tf_models.match(lowerCAmelCase ) is not None:
UpperCAmelCase__ : Optional[int] = tf_models
UpperCAmelCase__ : Tuple = _re_tf_models.match(lowerCAmelCase ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase ) is not None:
UpperCAmelCase__ : Union[str, Any] = flax_models
UpperCAmelCase__ : List[str] = _re_flax_models.match(lowerCAmelCase ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase ) is not None:
UpperCAmelCase__ : Optional[int] = pt_models
UpperCAmelCase__ : str = _re_pt_models.match(lowerCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase ) > 0:
if attr_name in model_name_to_prefix.values():
UpperCAmelCase__ : List[str] = True
break
# Try again after removing the last word in the name
UpperCAmelCase__ : Dict = """""".join(camel_case_split(lowerCAmelCase )[:-1] )
# Let's build that table!
UpperCAmelCase__ : Optional[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
UpperCAmelCase__ : Optional[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
UpperCAmelCase__ : Optional[int] = [len(lowerCAmelCase ) + 2 for c in columns]
UpperCAmelCase__ : Dict = max([len(lowerCAmelCase ) for name in model_names] ) + 2
# Build the table per se
UpperCAmelCase__ : Union[str, Any] = """|""" + """|""".join([_center_text(lowerCAmelCase , lowerCAmelCase ) for c, w in zip(lowerCAmelCase , lowerCAmelCase )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
UpperCAmelCase__ : Optional[int] = {True: """✅""", False: """❌"""}
for name in model_names:
UpperCAmelCase__ : Dict = model_name_to_prefix[name]
UpperCAmelCase__ : Union[str, Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase , lowerCAmelCase ) for l, w in zip(lowerCAmelCase , lowerCAmelCase )] ) + "|\n"
return table
def a__ ( lowerCAmelCase=False ) -> Optional[Any]:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = _find_text_in_file(
filename=os.path.join(lowerCAmelCase , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
UpperCAmelCase__ : str = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_A = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 355 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[2, 2, 3, 2] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=["stage2", "stage3", "stage4"] , _lowerCamelCase=3 , _lowerCamelCase=None , ):
"""simple docstring"""
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Union[str, Any] = image_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Optional[int] = num_stages
UpperCAmelCase__ : Optional[Any] = hidden_sizes
UpperCAmelCase__ : Any = depths
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : Tuple = use_labels
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : Tuple = type_sequence_label_size
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Tuple = out_features
UpperCAmelCase__ : Dict = num_labels
UpperCAmelCase__ : Tuple = scope
UpperCAmelCase__ : Optional[int] = num_stages
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : List[Any] = None
if self.use_labels:
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Dict = self.get_config()
return config, pixel_values, labels
def _a (self ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _a (self ):
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowerCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowerCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = UperNetForSemanticSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Tuple = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = UperNetModelTester(self )
UpperCAmelCase__ : Dict = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def _a (self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a (self ):
"""simple docstring"""
return
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _a (self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _a (self ):
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCAmelCase__ : List[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : int = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCAmelCase__ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Any = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : List[Any] = _config_zero_init(_lowerCamelCase )
UpperCAmelCase__ : List[str] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(config=_lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def _a (self ):
"""simple docstring"""
pass
@slow
def _a (self ):
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : str = UperNetForSemanticSegmentation.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
UpperCAmelCase__ : str = Image.open(lowerCAmelCase ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
UpperCAmelCase__ : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = prepare_img()
UpperCAmelCase__ : int = processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
with torch.no_grad():
UpperCAmelCase__ : Any = model(**_lowerCamelCase )
UpperCAmelCase__ : List[Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
UpperCAmelCase__ : List[str] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_lowerCamelCase )
UpperCAmelCase__ : Tuple = prepare_img()
UpperCAmelCase__ : int = processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
with torch.no_grad():
UpperCAmelCase__ : int = model(**_lowerCamelCase )
UpperCAmelCase__ : str = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCAmelCase__ : int = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
| 166 | 0 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCAmelCase ( __snake_case , __snake_case , __snake_case ):
@register_to_config
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , ):
"""simple docstring"""
super().__init__()
_snake_case = nn.Embedding(a_ , a_ )
_snake_case = nn.Embedding(a_ , a_ )
_snake_case = False
_snake_case = nn.Dropout(p=a_ )
_snake_case = TaConfig(
vocab_size=a_ , d_model=a_ , num_heads=a_ , d_kv=a_ , d_ff=a_ , dropout_rate=a_ , feed_forward_proj=a_ , is_decoder=a_ , is_encoder_decoder=a_ , )
_snake_case = nn.ModuleList()
for lyr_num in range(a_ ):
_snake_case = TaBlock(a_ )
self.encoders.append(a_ )
_snake_case = TaLayerNorm(a_ )
_snake_case = nn.Dropout(p=a_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.token_embedder(a_ )
_snake_case = encoder_input_tokens.shape[1]
_snake_case = torch.arange(a_ , device=encoder_input_tokens.device )
x += self.position_encoding(a_ )
_snake_case = self.dropout_pre(a_ )
# inverted the attention mask
_snake_case = encoder_input_tokens.size()
_snake_case = self.get_extended_attention_mask(a_ , a_ )
for lyr in self.encoders:
_snake_case = lyr(a_ , a_ )[0]
_snake_case = self.layer_norm(a_ )
return self.dropout_post(a_ ), encoder_inputs_mask
| 42 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ShapEPipeline
lowerCamelCase__ =['prompt']
lowerCamelCase__ =['prompt']
lowerCamelCase__ =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowerCamelCase__ =False
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 8
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__snake_case : Optional[int] = PriorTransformer(**a_ )
return model
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
__snake_case : List[Any] = ShapERenderer(**a_ )
return model
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.dummy_prior
__snake_case : str = self.dummy_text_encoder
__snake_case : str = self.dummy_tokenizer
__snake_case : Tuple = self.dummy_renderer
__snake_case : int = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=a_ , clip_sample=a_ , clip_sample_range=1.0 , )
__snake_case : Union[str, Any] = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def SCREAMING_SNAKE_CASE (self , a_ , a_=0 ):
'''simple docstring'''
if str(a_ ).startswith('''mps''' ):
__snake_case : Tuple = torch.manual_seed(a_ )
else:
__snake_case : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case : Optional[int] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = '''cpu'''
__snake_case : str = self.get_dummy_components()
__snake_case : List[Any] = self.pipeline_class(**a_ )
__snake_case : str = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(a_ ) )
__snake_case : List[str] = output.images[0]
__snake_case : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = torch_device == '''cpu'''
__snake_case : Tuple = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a_ , relax_max_difference=a_ , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.get_dummy_components()
__snake_case : int = self.pipeline_class(**a_ )
__snake_case : int = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = 1
__snake_case : List[Any] = 2
__snake_case : int = self.get_dummy_inputs(a_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Dict = batch_size * [inputs[key]]
__snake_case : Union[str, Any] = pipe(**a_ , num_images_per_prompt=a_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
__snake_case : Optional[Any] = ShapEPipeline.from_pretrained('''openai/shap-e''' )
__snake_case : Optional[int] = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case : str = pipe(
'''a shark''' , generator=a_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a_ , a_ )
| 102 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_A = logging.get_logger(__name__)
class _lowercase ( __UpperCAmelCase ):
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 351 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'big_bird'
def __init__( self , UpperCAmelCase_=50358 , UpperCAmelCase_=768 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=3072 , UpperCAmelCase_="gelu_new" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=4096 , UpperCAmelCase_=2 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-1_2 , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , UpperCAmelCase_=66 , UpperCAmelCase_="block_sparse" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=64 , UpperCAmelCase_=3 , UpperCAmelCase_=None , **UpperCAmelCase_ , ) -> Union[str, Any]:
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , sep_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase : Tuple = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : str = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : Optional[int] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : int = initializer_range
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : Union[str, Any] = layer_norm_eps
lowerCamelCase : int = use_cache
lowerCamelCase : Tuple = rescale_embeddings
lowerCamelCase : Dict = attention_type
lowerCamelCase : Optional[int] = use_bias
lowerCamelCase : Optional[int] = block_size
lowerCamelCase : Tuple = num_random_blocks
lowerCamelCase : List[Any] = classifier_dropout
class _lowercase ( __UpperCAmelCase ):
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 205 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __UpperCAmelCase :
def __init__( self : Any, __A : Dict, __A : Union[str, Any]=1_3, __A : int=3_0, __A : str=2, __A : Optional[int]=3, __A : str=True, __A : Dict=True, __A : Tuple=3_2, __A : Optional[Any]=5, __A : str=4, __A : List[Any]=3_7, __A : int="gelu", __A : int=0.1, __A : Any=0.1, __A : List[Any]=1_0, __A : Dict=0.0_2, __A : Optional[int]=3, __A : List[str]=None, __A : int=2, ):
UpperCAmelCase : int = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : List[Any] = image_size
UpperCAmelCase : Tuple = patch_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : Tuple = use_labels
UpperCAmelCase : Tuple = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : List[Any] = scope
UpperCAmelCase : Any = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase : List[str] = (image_size // patch_size) ** 2
UpperCAmelCase : Dict = num_patches + 2
def __magic_name__ ( self : Dict ):
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Optional[int] = None
if self.use_labels:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Dict ):
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCAmelCase_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def __magic_name__ ( self : Optional[int], __A : List[Any], __A : int, __A : Optional[int] ):
UpperCAmelCase : int = DeiTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase : Optional[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : str, __A : Union[str, Any], __A : Union[str, Any], __A : List[Any] ):
UpperCAmelCase : int = DeiTForMaskedImageModeling(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase : List[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : str = DeiTForMaskedImageModeling(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : Any = model(lowerCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self : str, __A : int, __A : Tuple, __A : Any ):
UpperCAmelCase : Dict = self.type_sequence_label_size
UpperCAmelCase : int = DeiTForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase : Optional[int] = model(lowerCAmelCase_, labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase : Any = 1
UpperCAmelCase : Optional[int] = DeiTForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : Optional[Any] = model(lowerCAmelCase_, labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
(
UpperCAmelCase
) : List[Any] = config_and_inputs
UpperCAmelCase : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
UpperCamelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : List[str] = DeiTModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self, config_class=lowerCAmelCase_, has_text_modality=lowerCAmelCase_, hidden_size=3_7 )
def __magic_name__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_, nn.Linear ) )
def __magic_name__ ( self : str ):
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = model_class(lowerCAmelCase_ )
UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __magic_name__ ( self : str ):
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int], __A : Dict, __A : Optional[int], __A : Optional[Any]=False ):
UpperCAmelCase : Optional[Any] = super()._prepare_for_class(lowerCAmelCase_, lowerCAmelCase_, return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __magic_name__ ( self : Any ):
if not self.model_tester.is_training:
return
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase : int = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
UpperCAmelCase : Union[str, Any] = self._prepare_for_class(lowerCAmelCase_, lowerCAmelCase_, return_labels=lowerCAmelCase_ )
UpperCAmelCase : List[Any] = model(**lowerCAmelCase_ ).loss
loss.backward()
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase : List[str] = False
UpperCAmelCase : str = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase : Tuple = model_class(lowerCAmelCase_ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase_ )
model.train()
UpperCAmelCase : Union[str, Any] = self._prepare_for_class(lowerCAmelCase_, lowerCAmelCase_, return_labels=lowerCAmelCase_ )
UpperCAmelCase : Tuple = model(**lowerCAmelCase_ ).loss
loss.backward()
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[int] = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase_ ),
*get_values(lowerCAmelCase_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
UpperCAmelCase : Tuple = problem_type['''title''']
UpperCAmelCase : Optional[Any] = problem_type['''num_labels''']
UpperCAmelCase : List[Any] = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
UpperCAmelCase : Any = self._prepare_for_class(lowerCAmelCase_, lowerCAmelCase_, return_labels=lowerCAmelCase_ )
if problem_type["num_labels"] > 1:
UpperCAmelCase : Any = inputs['''labels'''].unsqueeze(1 ).repeat(1, problem_type['''num_labels'''] )
UpperCAmelCase : Optional[int] = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase_ ) as warning_list:
UpperCAmelCase : List[Any] = model(**lowerCAmelCase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def __magic_name__ ( self : Tuple ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = DeiTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a__ ( ) -> List[Any]:
UpperCAmelCase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Optional[int] ):
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : int = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
lowerCAmelCase_ )
UpperCAmelCase : str = self.default_image_processor
UpperCAmelCase : Optional[Any] = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(images=lowerCAmelCase_, return_tensors='''pt''' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[int] = model(**lowerCAmelCase_ )
# verify the logits
UpperCAmelCase : Any = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCAmelCase_ )
UpperCAmelCase : Dict = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCAmelCase_, atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : List[Any] = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''', torch_dtype=torch.floataa, device_map='''auto''' )
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : int = prepare_img()
UpperCAmelCase : List[str] = image_processor(images=lowerCAmelCase_, return_tensors='''pt''' )
UpperCAmelCase : Any = inputs.pixel_values.to(lowerCAmelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase : Tuple = model(lowerCAmelCase_ )
| 336 |
UpperCAmelCase__ : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCamelCase__ ( a , a , a ) -> list[str]:
_A: Union[str, Any] = set()
# keep track of all the paths to be checked
_A: Union[str, Any] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_A: Any = queue.pop(0 )
# get the last node from the path
_A: Union[str, Any] = path[-1]
if node not in explored:
_A: str = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_A: Optional[int] = list(a )
new_path.append(a )
queue.append(a )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(a )
# in case there's no path between the 2 nodes
return []
def lowerCamelCase__ ( a , a , a ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_A: Any = [start]
_A: List[str] = set(a )
# Keep tab on distances from `start` node.
_A: Optional[int] = {start: 0, target: -1}
while queue:
_A: Union[str, Any] = queue.pop(0 )
if node == target:
_A: Dict = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(a )
queue.append(a )
_A: List[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 121 | 0 |
from __future__ import annotations
__UpperCAmelCase = 10
def __lowerCamelCase ( __magic_name__ : list[int] ):
a__: str =1
a__: Optional[Any] =max(__magic_name__ )
while placement <= max_digit:
# declare and initialize empty buckets
a__: list[list] =[[] for _ in range(__magic_name__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
a__: Optional[Any] =int((i / placement) % RADIX )
buckets[tmp].append(__magic_name__ )
# put each buckets' contents into list_of_ints
a__: Optional[int] =0
for b in range(__magic_name__ ):
for i in buckets[b]:
a__: Dict =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__UpperCAmelCase = 5_00_00
__UpperCAmelCase = 50_00
__UpperCAmelCase , __UpperCAmelCase = os.path.split(__file__)
__UpperCAmelCase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def __lowerCamelCase ( __magic_name__ : datasets.Dataset , __magic_name__ : int ):
for i in range(__magic_name__ ):
a__: int =dataset[i]
@get_duration
def __lowerCamelCase ( __magic_name__ : datasets.Dataset , __magic_name__ : Any , __magic_name__ : Union[str, Any] ):
for i in range(0 , len(__magic_name__ ) , __magic_name__ ):
a__: List[str] =dataset[i : i + batch_size]
@get_duration
def __lowerCamelCase ( __magic_name__ : datasets.Dataset , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] ):
with dataset.formatted_as(type=__magic_name__ ):
for i in range(__magic_name__ ):
a__: Optional[Any] =dataset[i]
@get_duration
def __lowerCamelCase ( __magic_name__ : datasets.Dataset , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] ):
with dataset.formatted_as(type=__magic_name__ ):
for i in range(0 , __magic_name__ , __magic_name__ ):
a__: List[Any] =dataset[i : i + batch_size]
def __lowerCamelCase ( ):
a__: Union[str, Any] ={"num examples": SPEED_TEST_N_EXAMPLES}
a__: int =[
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
a__: Optional[Any] =[
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
a__: str =datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
a__: List[str] =generate_example_dataset(
os.path.join(__magic_name__ , "dataset.arrow" ) , __magic_name__ , num_examples=__magic_name__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(__magic_name__ ) )
a__: str =func(__magic_name__ , **__magic_name__ )
print("shuffling dataset" )
a__: List[str] =dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(__magic_name__ ) )
a__: Optional[int] =func(
__magic_name__ , **__magic_name__ )
with open(__magic_name__ , "wb" ) as f:
f.write(json.dumps(__magic_name__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 42 | 1 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
__UpperCAmelCase = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
__UpperCAmelCase = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
__UpperCAmelCase = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
__UpperCAmelCase = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def __lowerCAmelCase ( self , __A , __A , __A=0.9 , __A=3 , __A=0.5 ) -> Union[str, Any]:
if NLTK_VERSION >= version.Version("""3.6.5""" ):
lowerCAmelCase_ :Union[str, Any] = [
meteor_score.single_meteor_score(
word_tokenize(_A ) , word_tokenize(_A ) , alpha=_A , beta=_A , gamma=_A )
for ref, pred in zip(_A , _A )
]
else:
lowerCAmelCase_ :List[Any] = [
meteor_score.single_meteor_score(_A , _A , alpha=_A , beta=_A , gamma=_A )
for ref, pred in zip(_A , _A )
]
return {"meteor": np.mean(_A )}
| 84 |
from itertools import permutations
def SCREAMING_SNAKE_CASE__ ( __a ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
snake_case_ : Any = [7, 11, 13, 17]
for i, test in enumerate(__a ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( __a = 10 ):
return sum(
int(''.join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327 | 0 |
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 262 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _lowercase ( _UpperCAmelCase ) -> str:
lowerCamelCase =[]
for line in lines:
lowerCamelCase =re.sub(r"""#.*""" , """""" , _UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(_UpperCAmelCase )
lowerCamelCase ="""\n""".join(_UpperCAmelCase )
# Make a hash from all this code
lowerCamelCase =full_str.encode("""utf-8""" )
return shaaaa(_UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase__ : str ={
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase__ : Tuple ={
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase__ : Optional[Any] ={'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
UpperCAmelCase__ : Dict[str, List[str]] ={}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 262 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Union[str, Any] = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = '''markuplm'''
def __init__( self : Dict , __lowerCAmelCase : Union[str, Any]=3_05_22 , __lowerCAmelCase : List[str]=7_68 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : str=12 , __lowerCAmelCase : str=30_72 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Union[str, Any]=5_12 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : Dict=1e-12 , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : List[Any]=0 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : List[Any]=2_56 , __lowerCAmelCase : Dict=10_24 , __lowerCAmelCase : Tuple=2_16 , __lowerCAmelCase : Union[str, Any]=10_01 , __lowerCAmelCase : int=32 , __lowerCAmelCase : str=50 , __lowerCAmelCase : List[str]="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Any , ) -> Any:
"""simple docstring"""
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
# additional properties
A__ = max_depth
A__ = max_xpath_tag_unit_embeddings
A__ = max_xpath_subs_unit_embeddings
A__ = tag_pad_id
A__ = subs_pad_id
A__ = xpath_unit_hidden_size
| 274 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : int = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Any = '''xlm-roberta'''
def __init__( self : Optional[Any] , __lowerCAmelCase : List[Any]=3_05_22 , __lowerCAmelCase : int=7_68 , __lowerCAmelCase : Tuple=12 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Union[str, Any]=30_72 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : List[str]=1e-12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Tuple="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=None , **__lowerCAmelCase : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 274 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_SCREAMING_SNAKE_CASE = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
_SCREAMING_SNAKE_CASE = {F"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = FunnelTokenizer
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = 2
def __init__( self : Any , __snake_case : str=None , __snake_case : List[str]=None , __snake_case : Optional[Any]=True , __snake_case : Union[str, Any]="<unk>" , __snake_case : Any="<sep>" , __snake_case : Optional[Any]="<pad>" , __snake_case : Dict="<cls>" , __snake_case : int="<mask>" , __snake_case : List[Any]="<s>" , __snake_case : Any="</s>" , __snake_case : Union[str, Any]=True , __snake_case : Any=True , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]="##" , **__snake_case : Dict , )-> Optional[Any]:
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , clean_text=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , wordpieces_prefix=__snake_case , **__snake_case , )
snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __snake_case ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __snake_case ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __snake_case ) != tokenize_chinese_chars
):
snake_case = getattr(__snake_case , normalizer_state.pop("""type""" ) )
snake_case = do_lower_case
snake_case = strip_accents
snake_case = tokenize_chinese_chars
snake_case = normalizer_class(**__snake_case )
snake_case = do_lower_case
def lowerCAmelCase ( self : Dict , __snake_case : str , __snake_case : Any=None )-> List[str]:
snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None )-> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : Tuple , __snake_case : str , __snake_case : Optional[str] = None )-> Tuple[str]:
snake_case = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
| 3 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 1
for i in range(0 , len(__lowerCAmelCase ) ):
total *= numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 0
for i in range(0 , len(__lowerCAmelCase ) ):
total += numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
lowerCAmelCase , lowerCAmelCase = head.next, head
while fast and fast.next:
lowerCAmelCase = fast.next.next
lowerCAmelCase = slow.next
lowerCAmelCase = slow.next
lowerCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
lowerCAmelCase = None
while second:
lowerCAmelCase = second.next
lowerCAmelCase = node
lowerCAmelCase = second
lowerCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCAmelCase = node.next
lowerCAmelCase = head.next
return True
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = head
while fast and fast.next:
lowerCAmelCase , lowerCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCAmelCase = [slow.val]
while slow.next:
lowerCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCAmelCase = cur.next
return True
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if not head or not head.next:
return True
lowerCAmelCase = {}
lowerCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = [pos]
lowerCAmelCase = head.next
pos += 1
lowerCAmelCase = pos - 1
lowerCAmelCase = 0
for v in d.values():
if len(SCREAMING_SNAKE_CASE ) % 2 != 0:
middle += 1
else:
lowerCAmelCase = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE ) ):
if v[i] + v[len(SCREAMING_SNAKE_CASE ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 46 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'summarization'
_SCREAMING_SNAKE_CASE = ['loss']
_SCREAMING_SNAKE_CASE = ROUGE_KEYS
_SCREAMING_SNAKE_CASE = 'rouge2'
def __init__( self , lowercase , **lowercase ) -> str:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCAmelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
lowerCAmelCase = Path(self.output_dir ) / """metrics.json"""
lowerCAmelCase = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
lowerCAmelCase = 0
lowerCAmelCase = defaultdict(lowercase )
lowerCAmelCase = self.config.model_type
lowerCAmelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
lowerCAmelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCAmelCase = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
lowerCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCAmelCase = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCAmelCase = get_git_info()["""repo_sha"""]
lowerCAmelCase = hparams.num_workers
lowerCAmelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
lowerCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCAmelCase = self.decoder_start_token_id
lowerCAmelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
lowerCAmelCase = False
lowerCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCAmelCase = self.hparams.eval_max_gen_length
else:
lowerCAmelCase = self.model.config.max_length
lowerCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _snake_case ( self , lowercase ) -> Dict[str, List[str]]:
lowerCAmelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
lowerCAmelCase = True
return readable_batch
def _snake_case ( self , lowercase , **lowercase ) -> Union[str, Any]:
return self.model(lowercase , **lowercase )
def _snake_case ( self , lowercase ) -> Union[str, Any]:
lowerCAmelCase = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def _snake_case ( self , lowercase ) -> Tuple:
lowerCAmelCase = self.tokenizer.pad_token_id
lowerCAmelCase , lowerCAmelCase = batch["""input_ids"""], batch["""attention_mask"""]
lowerCAmelCase = batch["""labels"""]
if isinstance(self.model , lowercase ):
lowerCAmelCase = self.model._shift_right(lowercase )
else:
lowerCAmelCase = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCAmelCase = decoder_input_ids
self.save_readable_batch(lowercase )
lowerCAmelCase = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
lowerCAmelCase = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCAmelCase = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
lowerCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCAmelCase = nn.functional.log_softmax(lowercase , dim=-1 )
lowerCAmelCase , lowerCAmelCase = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def _snake_case ( self ) -> int:
return self.tokenizer.pad_token_id
def _snake_case ( self , lowercase , lowercase ) -> Dict:
lowerCAmelCase = self._step(lowercase )
lowerCAmelCase = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
lowerCAmelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
lowerCAmelCase = batch["""input_ids"""].shape[0]
lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).sum()
lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return self._generative_step(lowercase )
def _snake_case ( self , lowercase , lowercase="val" ) -> Dict:
self.step_count += 1
lowerCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCAmelCase = losses["""loss"""]
lowerCAmelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
lowerCAmelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCAmelCase = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
lowerCAmelCase = {f'{prefix}_avg_{k}': x for k, x in losses.items()}
lowerCAmelCase = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
lowerCAmelCase = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'{prefix}_loss': loss,
f'{prefix}_{self.val_metric}': metric_tensor,
}
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return calculate_rouge(lowercase , lowercase )
def _snake_case ( self , lowercase ) -> dict:
lowerCAmelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCAmelCase = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCAmelCase = (time.time() - ta) / batch["""input_ids"""].shape[0]
lowerCAmelCase = self.ids_to_clean_text(lowercase )
lowerCAmelCase = self.ids_to_clean_text(batch["""labels"""] )
lowerCAmelCase = self._step(lowercase )
lowerCAmelCase = dict(zip(self.loss_names , lowercase ) )
lowerCAmelCase = self.calc_generative_metrics(lowercase , lowercase )
lowerCAmelCase = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return self._generative_step(lowercase )
def _snake_case ( self , lowercase ) -> int:
return self.validation_epoch_end(lowercase , prefix="""test""" )
def _snake_case ( self , lowercase ) -> SeqaSeqDataset:
lowerCAmelCase = self.n_obs[type_path]
lowerCAmelCase = self.target_lens[type_path]
lowerCAmelCase = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def _snake_case ( self , lowercase , lowercase , lowercase = False ) -> DataLoader:
lowerCAmelCase = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def _snake_case ( self ) -> DataLoader:
lowerCAmelCase = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def _snake_case ( self ) -> DataLoader:
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def _snake_case ( self ) -> DataLoader:
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
"""--max_source_length""" , default=1_024 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--max_tokens_per_batch""" , type=lowercase , default=lowercase )
parser.add_argument("""--logger_name""" , type=lowercase , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowercase , default=500 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowercase , default="""summarization""" , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument("""--src_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--tgt_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--eval_beams""" , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
"""--val_metric""" , type=lowercase , default=lowercase , required=lowercase , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowercase , default=lowercase , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowercase , default=1 , required=lowercase , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowercase , default=-1 , required=lowercase , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'translation'
_SCREAMING_SNAKE_CASE = ['loss']
_SCREAMING_SNAKE_CASE = ['bleu']
_SCREAMING_SNAKE_CASE = 'bleu'
def __init__( self , lowercase , **lowercase ) -> Union[str, Any]:
super().__init__(lowercase , **lowercase )
lowerCAmelCase = hparams.src_lang
lowerCAmelCase = hparams.tgt_lang
def _snake_case ( self , lowercase , lowercase ) -> dict:
return calculate_bleu(lowercase , lowercase )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
check_output_dir(SCREAMING_SNAKE_CASE , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCAmelCase = SummarizationModule(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = TranslationModule(SCREAMING_SNAKE_CASE )
lowerCAmelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
lowerCAmelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = os.environ.get("""WANDB_PROJECT""" , SCREAMING_SNAKE_CASE )
lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
lowerCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCAmelCase = False
lowerCAmelCase = args.val_metric == """loss"""
lowerCAmelCase = generic_train(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE ) , early_stopping_callback=SCREAMING_SNAKE_CASE , logger=SCREAMING_SNAKE_CASE , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
lowerCAmelCase = """"""
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=SCREAMING_SNAKE_CASE ) )
if checkpoints:
lowerCAmelCase = checkpoints[-1]
lowerCAmelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE__ = pl.Trainer.add_argparse_args(parser)
SCREAMING_SNAKE_CASE__ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 46 | 1 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowercase ( self : int ):
_a : Union[str, Any] = 1
_a : List[str] = 3
_a : Dict = (32, 32)
_a : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def __lowercase ( self : Dict ):
torch.manual_seed(0 )
_a : Any = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
return model
@property
def __lowercase ( self : List[str] ):
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
return model
@property
def __lowercase ( self : Optional[int] ):
torch.manual_seed(0 )
_a : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(__UpperCAmelCase )
@property
def __lowercase ( self : Optional[Any] ):
def extract(*_UpperCAmelCase : Dict ,**_UpperCAmelCase : Union[str, Any] ):
class __magic_name__ :
def __init__( self : List[str] ):
_a : str = torch.ones([0] )
def __lowercase ( self : Dict ,_UpperCAmelCase : List[Any] ):
self.pixel_values.to(__UpperCAmelCase )
return self
return Out()
return extract
def __lowercase ( self : Union[str, Any] ):
_a : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_a : Union[str, Any] = self.dummy_cond_unet
_a : Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=__UpperCAmelCase ,set_alpha_to_one=__UpperCAmelCase ,)
_a : int = self.dummy_vae
_a : Tuple = self.dummy_text_encoder
_a : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_a : List[str] = StableDiffusionPipeline(
unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase ,vae=__UpperCAmelCase ,text_encoder=__UpperCAmelCase ,tokenizer=__UpperCAmelCase ,safety_checker=__UpperCAmelCase ,feature_extractor=self.dummy_extractor ,)
_a : str = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_a : Optional[Any] = """A painting of a squirrel eating a burger"""
_a : str = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_a : List[Any] = sd_pipe([prompt] ,generator=__UpperCAmelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='np' )
_a : Union[str, Any] = output.images
_a : Tuple = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_a : str = sd_pipe(
[prompt] ,generator=__UpperCAmelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='np' ,return_dict=__UpperCAmelCase ,)[0]
_a : str = image[0, -3:, -3:, -1]
_a : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : List[Any] = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : int ):
_a : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
_a : List[str] = self.dummy_cond_unet
_a : Union[str, Any] = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
_a : str = self.dummy_vae
_a : List[str] = self.dummy_text_encoder
_a : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_a : str = StableDiffusionPipeline(
unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase ,vae=__UpperCAmelCase ,text_encoder=__UpperCAmelCase ,tokenizer=__UpperCAmelCase ,safety_checker=__UpperCAmelCase ,feature_extractor=self.dummy_extractor ,)
_a : Union[str, Any] = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_a : Tuple = """A painting of a squirrel eating a burger"""
_a : Union[str, Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_a : List[Any] = sd_pipe([prompt] ,generator=__UpperCAmelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='np' )
_a : Any = output.images
_a : Tuple = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_a : Optional[Any] = sd_pipe(
[prompt] ,generator=__UpperCAmelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='np' ,return_dict=__UpperCAmelCase ,)[0]
_a : str = image[0, -3:, -3:, -1]
_a : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : List[str] = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : Any ):
_a : List[Any] = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' ,safety_checker=__UpperCAmelCase )
assert isinstance(__UpperCAmelCase ,__UpperCAmelCase )
assert isinstance(pipe.scheduler ,__UpperCAmelCase )
assert pipe.safety_checker is None
_a : List[Any] = pipe('example prompt' ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
_a : Dict = StableDiffusionPipeline.from_pretrained(__UpperCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_a : int = pipe('example prompt' ,num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' ,'This test requires a GPU' )
def __lowercase ( self : Tuple ):
_a : List[str] = self.dummy_cond_unet
_a : Optional[int] = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
_a : List[str] = self.dummy_vae
_a : str = self.dummy_text_encoder
_a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
_a : List[Any] = unet.half()
_a : List[Any] = vae.half()
_a : List[Any] = bert.half()
# make sure here that pndm scheduler skips prk
_a : int = StableDiffusionPipeline(
unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase ,vae=__UpperCAmelCase ,text_encoder=__UpperCAmelCase ,tokenizer=__UpperCAmelCase ,safety_checker=__UpperCAmelCase ,feature_extractor=self.dummy_extractor ,)
_a : Any = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_a : str = """A painting of a squirrel eating a burger"""
_a : str = sd_pipe([prompt] ,num_inference_steps=2 ,output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Optional[int] ):
_a : Union[str, Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' ,safety_checker=__UpperCAmelCase )
_a : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_a : List[Any] = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_a : Tuple = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_a : int = 4003660346
_a : List[str] = 7
# without safety guidance (sld_guidance_scale = 0)
_a : List[str] = torch.manual_seed(__UpperCAmelCase )
_a : Tuple = sd_pipe(
[prompt] ,generator=__UpperCAmelCase ,guidance_scale=__UpperCAmelCase ,num_inference_steps=50 ,output_type='np' ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_a : Union[str, Any] = output.images
_a : Dict = image[0, -3:, -3:, -1]
_a : Union[str, Any] = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
_a : str = torch.manual_seed(__UpperCAmelCase )
_a : Tuple = sd_pipe(
[prompt] ,generator=__UpperCAmelCase ,guidance_scale=__UpperCAmelCase ,num_inference_steps=50 ,output_type='np' ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_25 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_a : Optional[int] = output.images
_a : Dict = image[0, -3:, -3:, -1]
_a : List[str] = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : List[str] ):
_a : List[str] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' ,safety_checker=__UpperCAmelCase )
_a : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_a : int = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_a : Dict = """padme amidala taking a bath artwork, safe for work, no nudity"""
_a : Optional[Any] = 2734971755
_a : Any = 7
_a : List[Any] = torch.manual_seed(__UpperCAmelCase )
_a : int = sd_pipe(
[prompt] ,generator=__UpperCAmelCase ,guidance_scale=__UpperCAmelCase ,num_inference_steps=50 ,output_type='np' ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_a : Optional[Any] = output.images
_a : Dict = image[0, -3:, -3:, -1]
_a : Optional[Any] = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
_a : Any = torch.manual_seed(__UpperCAmelCase )
_a : str = sd_pipe(
[prompt] ,generator=__UpperCAmelCase ,guidance_scale=__UpperCAmelCase ,num_inference_steps=50 ,output_type='np' ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_25 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_a : str = output.images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : List[Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : Union[str, Any] ):
_a : Union[str, Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
_a : List[Any] = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_a : Optional[Any] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_a : Optional[int] = 1044355234
_a : int = 12
_a : Any = torch.manual_seed(__UpperCAmelCase )
_a : Optional[Any] = sd_pipe(
[prompt] ,generator=__UpperCAmelCase ,guidance_scale=__UpperCAmelCase ,num_inference_steps=50 ,output_type='np' ,width=512 ,height=512 ,sld_guidance_scale=0 ,)
_a : int = output.images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : List[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
_a : List[str] = torch.manual_seed(__UpperCAmelCase )
_a : Union[str, Any] = sd_pipe(
[prompt] ,generator=__UpperCAmelCase ,guidance_scale=__UpperCAmelCase ,num_inference_steps=50 ,output_type='np' ,width=512 ,height=512 ,sld_guidance_scale=2000 ,sld_warmup_steps=7 ,sld_threshold=0.0_25 ,sld_momentum_scale=0.5 ,sld_mom_beta=0.7 ,)
_a : Tuple = output.images
_a : int = image[0, -3:, -3:, -1]
_a : List[str] = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 366 |
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
return "".join(sorted(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
return word_by_signature[signature(lowerCAmelCase_ )]
__lowerCAmelCase = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
__lowerCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
__lowerCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__lowerCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 107 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :int = tempfile.mkdtemp()
# fmt: off
__UpperCamelCase :int = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__UpperCamelCase :Tuple = dict(zip(__lowercase , range(len(__lowercase))))
__UpperCamelCase :List[Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__UpperCamelCase :Optional[Any] = {'''unk_token''': '''<unk>'''}
__UpperCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCamelCase :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(__lowercase) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(__lowercase))
__UpperCamelCase :List[str] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
__UpperCamelCase :Any = os.path.join(self.tmpdirname , __lowercase)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(__lowercase , __lowercase)
def UpperCamelCase__ ( self , **__lowercase) -> Any:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self , **__lowercase) -> Any:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self , **__lowercase) -> List[str]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase)
def UpperCamelCase__ ( self) -> List[str]:
shutil.rmtree(self.tmpdirname)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__UpperCamelCase :int = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Dict = self.get_tokenizer()
__UpperCamelCase :str = self.get_rust_tokenizer()
__UpperCamelCase :Optional[Any] = self.get_image_processor()
__UpperCamelCase :List[Any] = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase)
processor_slow.save_pretrained(self.tmpdirname)
__UpperCamelCase :Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase)
__UpperCamelCase :Union[str, Any] = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase)
processor_fast.save_pretrained(self.tmpdirname)
__UpperCamelCase :Dict = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , __lowercase)
self.assertIsInstance(processor_fast.tokenizer , __lowercase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , __lowercase)
self.assertIsInstance(processor_fast.image_processor , __lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Dict = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__UpperCamelCase :int = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
__UpperCamelCase :List[Any] = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0)
__UpperCamelCase :Optional[Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowercase)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Optional[int] = self.get_image_processor()
__UpperCamelCase :Optional[Any] = self.get_tokenizer()
__UpperCamelCase :Dict = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :Dict = self.prepare_image_inputs()
__UpperCamelCase :Union[str, Any] = image_processor(__lowercase , return_tensors='''np''')
__UpperCamelCase :str = processor(images=__lowercase , return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Tuple = self.get_image_processor()
__UpperCamelCase :Dict = self.get_tokenizer()
__UpperCamelCase :str = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :Tuple = '''lower newer'''
__UpperCamelCase :str = processor(text=__lowercase)
__UpperCamelCase :Optional[Any] = tokenizer(__lowercase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :str = self.get_image_processor()
__UpperCamelCase :Tuple = self.get_tokenizer()
__UpperCamelCase :Optional[Any] = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :Optional[int] = '''lower newer'''
__UpperCamelCase :str = self.prepare_image_inputs()
__UpperCamelCase :Any = processor(text=__lowercase , images=__lowercase)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(__lowercase):
processor()
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :str = self.get_image_processor()
__UpperCamelCase :Tuple = self.get_tokenizer()
__UpperCamelCase :List[Any] = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :Optional[Any] = self.prepare_image_inputs()
__UpperCamelCase :Optional[Any] = self.prepare_image_inputs()
__UpperCamelCase :List[Any] = processor(images=__lowercase , visual_prompt=__lowercase)
self.assertListEqual(list(inputs.keys()) , ['''pixel_values''', '''conditional_pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(__lowercase):
processor()
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Optional[Any] = self.get_image_processor()
__UpperCamelCase :List[str] = self.get_tokenizer()
__UpperCamelCase :Optional[Any] = CLIPSegProcessor(tokenizer=__lowercase , image_processor=__lowercase)
__UpperCamelCase :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase :Optional[Any] = processor.batch_decode(__lowercase)
__UpperCamelCase :Any = tokenizer.batch_decode(__lowercase)
self.assertListEqual(__lowercase , __lowercase)
| 43 | import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowercase = (720, 1280) # Height, Width
__lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowercase = 1 / 100
__lowercase = ''''''
__lowercase = ''''''
__lowercase = ''''''
__lowercase = 250
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[Any] = get_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for index in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 4 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = update_image_and_anno(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , filter_scale=SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase :List[Any] = random_chars(32 )
__UpperCamelCase :List[str] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCamelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__UpperCamelCase :Optional[Any] = []
for anno in new_annos:
__UpperCamelCase :int = anno[3] - anno[1]
__UpperCamelCase :Optional[int] = anno[4] - anno[2]
__UpperCamelCase :int = anno[1] + width / 2
__UpperCamelCase :List[str] = anno[2] + height / 2
__UpperCamelCase :str = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(SCREAMING_SNAKE_CASE )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = []
__UpperCamelCase :str = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
__UpperCamelCase :Any = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE ) as in_file:
__UpperCamelCase :str = in_file.readlines()
__UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , f"""{label_name}.jpg""" )
__UpperCamelCase :int = []
for obj_list in obj_lists:
__UpperCamelCase :Optional[int] = obj_list.rstrip('''\n''' ).split(''' ''' )
__UpperCamelCase :Any = float(obj[1] ) - float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) - float(obj[4] ) / 2
__UpperCamelCase :Dict = float(obj[1] ) + float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
return img_paths, labels
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , ):
'''simple docstring'''
__UpperCamelCase :List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCamelCase :List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :Optional[int] = int(scale_x * output_size[1] )
__UpperCamelCase :Any = int(scale_y * output_size[0] )
__UpperCamelCase :List[str] = []
__UpperCamelCase :Dict = []
for i, index in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Any = all_img_list[index]
path_list.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = all_annos[index]
__UpperCamelCase :Union[str, Any] = cva.imread(SCREAMING_SNAKE_CASE )
if i == 0: # top-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
__UpperCamelCase :Union[str, Any] = img
for bbox in img_annos:
__UpperCamelCase :Union[str, Any] = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = bbox[2] * scale_y
__UpperCamelCase :int = bbox[3] * scale_x
__UpperCamelCase :Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCamelCase :List[str] = img
for bbox in img_annos:
__UpperCamelCase :str = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Dict = bbox[2] * scale_y
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Tuple = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Tuple = bbox[3] * scale_x
__UpperCamelCase :Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCamelCase :Optional[int] = cva.resize(
SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCamelCase :List[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase :Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 43 | 1 |
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
UpperCamelCase_ =datasets.logging.get_logger(__name__)
UpperCamelCase_ ="\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
UpperCamelCase_ ="\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
UpperCamelCase_ ="\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
UpperCamelCase_ ={
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def snake_case ( self : Tuple ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage='''https://github.com/google-research/bleurt''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Value('''string''', id='''sequence''' ),
} ), codebase_urls=['''https://github.com/google-research/bleurt'''], reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''], )
def snake_case ( self : List[Any], lowerCAmelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
_UpperCamelCase : str = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
_UpperCamelCase : Dict = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_UpperCamelCase : int = self.config_name.upper()
else:
raise KeyError(
f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
_UpperCamelCase : Optional[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_UpperCamelCase : Union[str, Any] = score.BleurtScorer(os.path.join(UpperCAmelCase_, UpperCAmelCase_ ) )
def snake_case ( self : List[Any], lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Dict = self.scorer.score(references=UpperCAmelCase_, candidates=UpperCAmelCase_ )
return {"scores": scores}
| 365 |
"""simple docstring"""
UpperCamelCase_ =[
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 128 | 0 |
import requests
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = {"""Content-Type""": """application/json"""}
_A : str = requests.post(snake_case_,json={"""text""": message_body},headers=snake_case_ )
if response.status_code != 200:
_A : Union[str, Any] = (
"""Request to slack returned an error """
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 26 |
from __future__ import annotations
from typing import Generic, TypeVar
a_ = TypeVar("""T""")
class __lowerCAmelCase ( Generic[T] ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = data
__lowerCamelCase = self
__lowerCamelCase = 0
class __lowerCAmelCase ( Generic[T] ):
def __init__( self ):
'''simple docstring'''
# map from node name to the node object
__lowerCamelCase = {}
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# create a new set with x as its member
__lowerCamelCase = DisjointSetTreeNode(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# find the set x belongs to (with path-compression)
__lowerCamelCase = self.map[data]
if elem_ref != elem_ref.parent:
__lowerCamelCase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# helper function for union operation
if nodea.rank > nodea.rank:
__lowerCamelCase = nodea
else:
__lowerCamelCase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# merge 2 disjoint sets
self.link(self.find_set(__UpperCAmelCase ) , self.find_set(__UpperCAmelCase ) )
class __lowerCAmelCase ( Generic[T] ):
def __init__( self ):
'''simple docstring'''
# connections: map from the node to the neighbouring nodes (with weights)
__lowerCamelCase = {}
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# add a node ONLY if its not present in the graph
if node not in self.connections:
__lowerCamelCase = {}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# add an edge with the given weight
self.add_node(__UpperCAmelCase )
self.add_node(__UpperCAmelCase )
__lowerCamelCase = weight
__lowerCamelCase = weight
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __UpperCAmelCase : x[2] )
# creating the disjoint set
__lowerCamelCase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__UpperCAmelCase )
# MST generation
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = edges[index]
index += 1
__lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase )
__lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
disjoint_set.union(__UpperCAmelCase , __UpperCAmelCase )
return graph
| 330 | 0 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
_lowerCamelCase : List[str] = TypeVar("KT")
_lowerCamelCase : str = TypeVar("VT")
class __UpperCAmelCase ( Generic[KT, VT] ):
def __init__( self : Union[str, Any], __A : KT | str = "root", __A : VT | None = None ):
UpperCAmelCase : List[Any] = key
UpperCAmelCase : int = value
UpperCAmelCase : list[Node[KT, VT]] = []
def __repr__( self : Union[str, Any] ):
return F'''Node({self.key}: {self.value})'''
@property
def __magic_name__ ( self : str ):
return len(self.forward )
class __UpperCAmelCase ( Generic[KT, VT] ):
def __init__( self : str, __A : float = 0.5, __A : int = 1_6 ):
UpperCAmelCase : Node[KT, VT] = Node[KT, VT]()
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : Optional[int] = p
UpperCAmelCase : List[Any] = max_level
def __str__( self : Optional[Any] ):
UpperCAmelCase : str = list(self )
if len(__A ) == 0:
return F'''SkipList(level={self.level})'''
UpperCAmelCase : List[str] = max((len(str(__A ) ) for item in items), default=4 )
UpperCAmelCase : Any = max(__A, 4 ) + 4
UpperCAmelCase : Tuple = self.head
UpperCAmelCase : Dict = []
UpperCAmelCase : Optional[int] = node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(__A, '''-''' ) + '''* ''' * len(__A ) )
lines.append(''' ''' * label_size + '''| ''' * len(__A ) )
while len(node.forward ) != 0:
UpperCAmelCase : Dict = node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(__A, '''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(__A ) )
UpperCAmelCase : List[Any] = node.forward
lines.append('''None'''.ljust(__A ) + '''* ''' * len(__A ) )
return F'''SkipList(level={self.level})\n''' + "\n".join(__A )
def __iter__( self : Dict ):
UpperCAmelCase : Dict = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
UpperCAmelCase : Union[str, Any] = node.forward[0]
def __magic_name__ ( self : str ):
UpperCAmelCase : str = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __magic_name__ ( self : Union[str, Any], __A : List[str] ):
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Optional[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
UpperCAmelCase : int = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__A )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __magic_name__ ( self : Tuple, __A : KT ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._locate_node(__A )
if node is not None:
for i, update_node in enumerate(__A ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
UpperCAmelCase : List[str] = node.forward[i]
else:
UpperCAmelCase : List[Any] = update_node.forward[:i]
def __magic_name__ ( self : int, __A : KT, __A : VT ):
UpperCAmelCase , UpperCAmelCase : List[str] = self._locate_node(__A )
if node is not None:
UpperCAmelCase : Union[str, Any] = value
else:
UpperCAmelCase : List[Any] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1, __A ):
update_vector.append(self.head )
UpperCAmelCase : List[Any] = level
UpperCAmelCase : List[Any] = Node(__A, __A )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__A )
else:
UpperCAmelCase : List[Any] = new_node
def __magic_name__ ( self : Tuple, __A : VT ):
UpperCAmelCase , UpperCAmelCase : str = self._locate_node(__A )
if node is not None:
return node.value
return None
def a__ ( ) -> List[Any]:
UpperCAmelCase : List[Any] = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
UpperCAmelCase : Optional[Any] = skip_list.head
UpperCAmelCase : Tuple = {}
while node.level != 0:
UpperCAmelCase : Any = node.forward[0]
UpperCAmelCase : Tuple = node.value
assert len(UpperCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def a__ ( ) -> Dict:
UpperCAmelCase : List[str] = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
UpperCAmelCase : Optional[Any] = skip_list.head
UpperCAmelCase : List[str] = {}
while node.level != 0:
UpperCAmelCase : Union[str, Any] = node.forward[0]
UpperCAmelCase : int = node.value
if len(UpperCAmelCase ) != 4:
print()
assert len(UpperCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def a__ ( ) -> Any:
UpperCAmelCase : int = SkipList()
assert skip_list.find('''Some key''' ) is None
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase : Dict = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def a__ ( ) -> List[Any]:
UpperCAmelCase : Any = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def a__ ( ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def a__ ( ) -> str:
UpperCAmelCase : Optional[int] = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(UpperCAmelCase : Dict ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(UpperCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def a__ ( ) -> List[str]:
def is_sorted(UpperCAmelCase : Optional[Any] ):
return all(next_item >= item for item, next_item in zip(UpperCAmelCase , lst[1:] ) )
UpperCAmelCase : Any = SkipList()
for i in range(10 ):
skip_list.insert(UpperCAmelCase , UpperCAmelCase )
assert is_sorted(list(UpperCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(UpperCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(UpperCAmelCase ) )
def a__ ( ) -> Any:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def a__ ( ) -> Dict:
UpperCAmelCase : List[Any] = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 99 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : str = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 99 | 1 |
import math
def __magic_name__ ( A : int = 100 ):
'''simple docstring'''
a = sum(i * i for i in range(1, n + 1 ) )
a = int(math.pow(sum(range(1, n + 1 ) ), 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 107 |
'''simple docstring'''
_UpperCamelCase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_UpperCamelCase : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
_UpperCamelCase : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 304 | 0 |
from __future__ import annotations
from collections import namedtuple
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = namedtuple('result' , 'name value')
if (voltage, current, power).count(0) != 1:
raise ValueError('Only one argument must be 0')
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system')
elif voltage == 0:
return result('voltage' , power / current)
elif current == 0:
return result('current' , power / voltage)
elif power == 0:
return result('power' , float(round(abs(voltage * current) , 2)))
else:
raise ValueError('Exactly one argument must be 0')
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a_ : Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 327 | 1 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowercase__ ={
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1000,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
lowercase__ ={
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1000,
'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
lowercase__ ={
'sample_size': 256,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
lowercase__ ={
'num_train_timesteps': 40,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
lowercase__ ={
'num_train_timesteps': 201,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
lowercase__ ={
'num_train_timesteps': 151,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
def __UpperCamelCase ( lowerCAmelCase__ : List[str] ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict=False ):
__a : str = checkpoint[f"{old_prefix}.in_layers.0.weight"]
__a : Optional[Any] = checkpoint[f"{old_prefix}.in_layers.0.bias"]
__a : Any = checkpoint[f"{old_prefix}.in_layers.2.weight"]
__a : List[Any] = checkpoint[f"{old_prefix}.in_layers.2.bias"]
__a : List[str] = checkpoint[f"{old_prefix}.emb_layers.1.weight"]
__a : Any = checkpoint[f"{old_prefix}.emb_layers.1.bias"]
__a : Optional[int] = checkpoint[f"{old_prefix}.out_layers.0.weight"]
__a : Union[str, Any] = checkpoint[f"{old_prefix}.out_layers.0.bias"]
__a : Dict = checkpoint[f"{old_prefix}.out_layers.3.weight"]
__a : Union[str, Any] = checkpoint[f"{old_prefix}.out_layers.3.bias"]
if has_skip:
__a : Tuple = checkpoint[f"{old_prefix}.skip_connection.weight"]
__a : Union[str, Any] = checkpoint[f"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def __UpperCamelCase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : int=None ):
__a , __a , __a : str = checkpoint[f"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
__a , __a , __a : Any = checkpoint[f"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
__a : List[Any] = checkpoint[f"{old_prefix}.norm.weight"]
__a : List[str] = checkpoint[f"{old_prefix}.norm.bias"]
__a : Union[str, Any] = weight_q.squeeze(-1 ).squeeze(-1 )
__a : Any = bias_q.squeeze(-1 ).squeeze(-1 )
__a : int = weight_k.squeeze(-1 ).squeeze(-1 )
__a : Any = bias_k.squeeze(-1 ).squeeze(-1 )
__a : int = weight_v.squeeze(-1 ).squeeze(-1 )
__a : Optional[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
__a : Optional[int] = (
checkpoint[f"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
__a : Optional[Any] = checkpoint[f"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : int ):
__a : Any = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
__a : Tuple = {}
__a : Union[str, Any] = checkpoint['''time_embed.0.weight''']
__a : Union[str, Any] = checkpoint['''time_embed.0.bias''']
__a : List[Any] = checkpoint['''time_embed.2.weight''']
__a : Any = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
__a : List[str] = checkpoint['''label_emb.weight''']
__a : Any = checkpoint['''input_blocks.0.0.weight''']
__a : int = checkpoint['''input_blocks.0.0.bias''']
__a : Tuple = unet_config['''down_block_types''']
__a : List[Any] = unet_config['''layers_per_block''']
__a : Union[str, Any] = unet_config['''attention_head_dim''']
__a : Tuple = unet_config['''block_out_channels''']
__a : List[Any] = 1
__a : Any = channels_list[0]
for i, layer_type in enumerate(lowerCAmelCase__ ):
__a : List[str] = channels_list[i]
__a : Optional[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowerCAmelCase__ ):
__a : List[Any] = f"down_blocks.{i}.resnets.{j}"
__a : Dict = f"input_blocks.{current_layer}.0"
__a : List[str] = True if j == 0 and downsample_block_has_skip else False
__a : Optional[Any] = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , has_skip=lowerCAmelCase__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowerCAmelCase__ ):
__a : int = f"down_blocks.{i}.resnets.{j}"
__a : Tuple = f"input_blocks.{current_layer}.0"
__a : Optional[int] = True if j == 0 and downsample_block_has_skip else False
__a : List[Any] = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , has_skip=lowerCAmelCase__ )
__a : int = f"down_blocks.{i}.attentions.{j}"
__a : int = f"input_blocks.{current_layer}.1"
__a : Union[str, Any] = convert_attention(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
current_layer += 1
if i != len(lowerCAmelCase__ ) - 1:
__a : List[str] = f"down_blocks.{i}.downsamplers.0"
__a : List[str] = f"input_blocks.{current_layer}.0"
__a : Any = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
current_layer += 1
__a : List[Any] = current_channels
# hardcoded the mid-block for now
__a : Tuple = '''mid_block.resnets.0'''
__a : Tuple = '''middle_block.0'''
__a : Optional[Any] = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a : str = '''mid_block.attentions.0'''
__a : str = '''middle_block.1'''
__a : List[str] = convert_attention(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a : int = '''mid_block.resnets.1'''
__a : int = '''middle_block.2'''
__a : Union[str, Any] = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a : int = 0
__a : Optional[Any] = unet_config['''up_block_types''']
for i, layer_type in enumerate(lowerCAmelCase__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__a : Any = f"up_blocks.{i}.resnets.{j}"
__a : Optional[int] = f"output_blocks.{current_layer}.0"
__a : Tuple = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , has_skip=lowerCAmelCase__ )
current_layer += 1
if i != len(lowerCAmelCase__ ) - 1:
__a : List[Any] = f"up_blocks.{i}.upsamplers.0"
__a : Optional[Any] = f"output_blocks.{current_layer-1}.1"
__a : Tuple = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__a : List[str] = f"up_blocks.{i}.resnets.{j}"
__a : Dict = f"output_blocks.{current_layer}.0"
__a : Tuple = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , has_skip=lowerCAmelCase__ )
__a : Optional[Any] = f"up_blocks.{i}.attentions.{j}"
__a : Union[str, Any] = f"output_blocks.{current_layer}.1"
__a : Dict = convert_attention(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
current_layer += 1
if i != len(lowerCAmelCase__ ) - 1:
__a : Optional[int] = f"up_blocks.{i}.upsamplers.0"
__a : int = f"output_blocks.{current_layer-1}.2"
__a : Union[str, Any] = convert_resnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a : Any = checkpoint['''out.0.weight''']
__a : str = checkpoint['''out.0.bias''']
__a : Any = checkpoint['''out.2.weight''']
__a : Optional[int] = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
lowercase__ =parser.parse_args()
lowercase__ =strabool(args.class_cond)
lowercase__ =os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
lowercase__ =IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase__ =LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowercase__ =TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
lowercase__ =None
lowercase__ =con_pt_to_diffuser(args.unet_path, unet_config)
lowercase__ =UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowercase__ =CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowercase__ =CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase__ =CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
lowercase__ =CMStochasticIterativeScheduler(**scheduler_config)
lowercase__ =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 216 |
from __future__ import annotations
from typing import Any
class UpperCamelCase__ :
def __init__(self : Union[str, Any] , snake_case_ : int ):
__a : Dict = num_of_nodes
__a : list[list[int]] = []
__a : dict[int, int] = {}
def lowerCAmelCase (self : Optional[Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowerCAmelCase (self : Any , snake_case_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowerCAmelCase (self : str , snake_case_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__a : Optional[int] = self.find_component(snake_case_ )
def lowerCAmelCase (self : Any , snake_case_ : list[int] , snake_case_ : int , snake_case_ : int ):
if component_size[u_node] <= component_size[v_node]:
__a : List[str] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case_ )
elif component_size[u_node] >= component_size[v_node]:
__a : Optional[int] = self.find_component(snake_case_ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case_ )
def lowerCAmelCase (self : Optional[Any] ):
__a : str = []
__a : int = 0
__a : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__a : Union[str, Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__a , __a , __a : Optional[Any] = edge
__a : List[str] = self.m_component[u]
__a : List[Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__a : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case_ , snake_case_ ):
__a , __a , __a : str = edge
__a : Any = self.m_component[u]
__a : Any = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case_ , snake_case_ , snake_case_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__a : Optional[int] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def __UpperCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase_ = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 239 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[Any] = '''levit'''
def __init__( self ,SCREAMING_SNAKE_CASE__=2_24 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=[1_28, 2_56, 3_84] ,SCREAMING_SNAKE_CASE__=[4, 8, 12] ,SCREAMING_SNAKE_CASE__=[4, 4, 4] ,SCREAMING_SNAKE_CASE__=[16, 16, 16] ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=[2, 2, 2] ,SCREAMING_SNAKE_CASE__=[2, 2, 2] ,SCREAMING_SNAKE_CASE__=0.0_2 ,**SCREAMING_SNAKE_CASE__ ,) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = image_size
__SCREAMING_SNAKE_CASE :Dict = num_channels
__SCREAMING_SNAKE_CASE :Optional[int] = kernel_size
__SCREAMING_SNAKE_CASE :Union[str, Any] = stride
__SCREAMING_SNAKE_CASE :List[Any] = padding
__SCREAMING_SNAKE_CASE :Tuple = hidden_sizes
__SCREAMING_SNAKE_CASE :List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE :Optional[int] = depths
__SCREAMING_SNAKE_CASE :Optional[Any] = key_dim
__SCREAMING_SNAKE_CASE :Optional[Any] = drop_path_rate
__SCREAMING_SNAKE_CASE :Tuple = patch_size
__SCREAMING_SNAKE_CASE :int = attention_ratio
__SCREAMING_SNAKE_CASE :List[Any] = mlp_ratio
__SCREAMING_SNAKE_CASE :str = initializer_range
__SCREAMING_SNAKE_CASE :Optional[Any] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : int = version.parse('''1.11''' )
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCamelCase ( self ) -> float:
"""simple docstring"""
return 1E-4 | 239 | 1 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = torch.nn.Linear(1_0 , 1_0 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(UpperCAmelCase__ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 4 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 | 1 |
A__ = '''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 366 |
from sklearn.metrics import mean_squared_error
import datasets
A__ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
A__ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
A__ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def __lowerCamelCase ( self :List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] ,)
def __lowerCamelCase ( self :Tuple ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __lowerCamelCase ( self :List[str] ,__lowercase :Optional[int] ,__lowercase :int ,__lowercase :Any=None ,__lowercase :List[str]="uniform_average" ,__lowercase :List[Any]=True ):
snake_case__ : Union[str, Any] = mean_squared_error(
__lowercase ,__lowercase ,sample_weight=__lowercase ,multioutput=__lowercase ,squared=__lowercase )
return {"mse": mse}
| 44 | 0 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowercase_ : List[Any] = TypeVar('KT')
lowercase_ : Tuple = TypeVar('VT')
class __lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self : str , snake_case__ : KT | str = "root" , snake_case__ : VT | None = None ):
"""simple docstring"""
_UpperCAmelCase = key
_UpperCAmelCase = value
_UpperCAmelCase = []
def __repr__( self : Optional[int] ):
"""simple docstring"""
return F"""Node({self.key}: {self.value})"""
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return len(self.forward )
class __lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self : Union[str, Any] , snake_case__ : float = 0.5 , snake_case__ : int = 16 ):
"""simple docstring"""
_UpperCAmelCase = Node[KT, VT]()
_UpperCAmelCase = 0
_UpperCAmelCase = p
_UpperCAmelCase = max_level
def __str__( self : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = list(self )
if len(snake_case__ ) == 0:
return F"""SkipList(level={self.level})"""
_UpperCAmelCase = max((len(str(snake_case__ ) ) for item in items) , default=4 )
_UpperCAmelCase = max(snake_case__ , 4 ) + 4
_UpperCAmelCase = self.head
_UpperCAmelCase = []
_UpperCAmelCase = node.forward.copy()
lines.append(F"""[{node.key}]""".ljust(snake_case__ , "-" ) + "* " * len(snake_case__ ) )
lines.append(" " * label_size + "| " * len(snake_case__ ) )
while len(node.forward ) != 0:
_UpperCAmelCase = node.forward[0]
lines.append(
F"""[{node.key}]""".ljust(snake_case__ , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(snake_case__ ) )
_UpperCAmelCase = node.forward
lines.append("None".ljust(snake_case__ ) + "* " * len(snake_case__ ) )
return F"""SkipList(level={self.level})\n""" + "\n".join(snake_case__ )
def __iter__( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
_UpperCAmelCase = node.forward[0]
def UpperCamelCase ( self : str ):
"""simple docstring"""
_UpperCAmelCase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_UpperCAmelCase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(snake_case__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : KT ):
"""simple docstring"""
_UpperCAmelCase = self._locate_node(snake_case__ )
if node is not None:
for i, update_node in enumerate(snake_case__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_UpperCAmelCase = node.forward[i]
else:
_UpperCAmelCase = update_node.forward[:i]
def UpperCamelCase ( self : Tuple , snake_case__ : KT , snake_case__ : VT ):
"""simple docstring"""
_UpperCAmelCase = self._locate_node(snake_case__ )
if node is not None:
_UpperCAmelCase = value
else:
_UpperCAmelCase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , snake_case__ ):
update_vector.append(self.head )
_UpperCAmelCase = level
_UpperCAmelCase = Node(snake_case__ , snake_case__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(snake_case__ )
else:
_UpperCAmelCase = new_node
def UpperCamelCase ( self : Optional[int] , snake_case__ : VT ):
"""simple docstring"""
_UpperCAmelCase = self._locate_node(snake_case__ )
if node is not None:
return node.value
return None
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
_UpperCAmelCase = skip_list.head
_UpperCAmelCase = {}
while node.level != 0:
_UpperCAmelCase = node.forward[0]
_UpperCAmelCase = node.value
assert len(snake_case_ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
_UpperCAmelCase = skip_list.head
_UpperCAmelCase = {}
while node.level != 0:
_UpperCAmelCase = node.forward[0]
_UpperCAmelCase = node.value
if len(snake_case_ ) != 4:
print()
assert len(snake_case_ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = SkipList()
assert skip_list.find("Some key" ) is None
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 142 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(snake_case_ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(snake_case_ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
def is_sorted(snake_case_ ):
return all(next_item >= item for item, next_item in zip(snake_case_ , lst[1:] ) )
_UpperCAmelCase = SkipList()
for i in range(10 ):
skip_list.insert(snake_case_ , snake_case_ )
assert is_sorted(list(snake_case_ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(snake_case_ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(snake_case_ ) )
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 133 |
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
SCREAMING_SNAKE_CASE_: Optional[int] =str(bin(lowercase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE_: Any =str(bin(lowercase ) )[2:]
SCREAMING_SNAKE_CASE_: Dict =max(len(lowercase ) , len(lowercase ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class a_ ( a_ ):
'''simple docstring'''
__a: List[Any] = '''yolos'''
def __init__( self , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=[5_1_2, 8_6_4] , lowercase_=1_6 , lowercase_=3 , lowercase_=True , lowercase_=1_0_0 , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> int:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = num_detection_tokens
lowerCAmelCase_ = use_mid_position_embeddings
lowerCAmelCase_ = auxiliary_loss
# Hungarian matcher
lowerCAmelCase_ = class_cost
lowerCAmelCase_ = bbox_cost
lowerCAmelCase_ = giou_cost
# Loss coefficients
lowerCAmelCase_ = bbox_loss_coefficient
lowerCAmelCase_ = giou_loss_coefficient
lowerCAmelCase_ = eos_coefficient
class a_ ( a_ ):
'''simple docstring'''
__a: Tuple = version.parse('''1.11''' )
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _lowercase ( self ) -> float:
'''simple docstring'''
return 1e-4
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return 1_2
| 14 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a_ :
'''simple docstring'''
__a: int
__a: int
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = [[] for _ in range(lowercase_ )]
lowerCAmelCase_ = size
def __getitem__( self , lowercase_ ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
return self._size
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) )
def _lowercase ( self , lowercase_ , lowercase_ ) -> int | None:
'''simple docstring'''
lowerCAmelCase_ = deque([start_vertex] )
lowerCAmelCase_ = [None] * self.size
lowerCAmelCase_ = 0
while queue:
lowerCAmelCase_ = queue.popleft()
lowerCAmelCase_ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCAmelCase_ = current_distance + edge.weight
lowerCAmelCase_ = distances[edge.destination_vertex]
if (
isinstance(lowercase_ , lowercase_ )
and new_distance >= dest_vertex_distance
):
continue
lowerCAmelCase_ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=2 , snake_case=8 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=1_6 , snake_case=5 , snake_case=2 , snake_case=3_6 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : Dict = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : int = seq_length
UpperCAmelCase : str = is_training
UpperCAmelCase : str = use_input_mask
UpperCAmelCase : Optional[Any] = use_token_type_ids
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : List[str] = vocab_size
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : int = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : str = num_labels
UpperCAmelCase : Union[str, Any] = num_choices
UpperCAmelCase : List[Any] = scope
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : List[Any] = None
if self.use_input_mask:
UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : List[str] = None
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Optional[Any] = None
if self.use_labels:
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.get_config()
UpperCAmelCase : Dict = 3_0_0
return config
def A_ ( self ):
'''simple docstring'''
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase : Tuple = True
UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = MraModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
UpperCAmelCase : Any = model(snake_case , token_type_ids=snake_case )
UpperCAmelCase : Optional[Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : Dict = True
UpperCAmelCase : Any = MraModel(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
UpperCAmelCase : List[str] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , )
UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = MraForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = MraForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[str] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.num_labels
UpperCAmelCase : Any = MraForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Dict = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = self.num_labels
UpperCAmelCase : List[Any] = MraForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = self.num_choices
UpperCAmelCase : Dict = MraForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
UpperCAmelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Optional[int] = ()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = MraModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : List[Any] = MraModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip(reason="MRA does not output attentions" )
def A_ ( self ):
'''simple docstring'''
return
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase : List[Any] = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(snake_case )[0]
UpperCAmelCase : Dict = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , snake_case )
UpperCAmelCase : int = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase : Any = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : Tuple = model(snake_case )[0]
UpperCAmelCase : Union[str, Any] = 5_0_2_6_5
UpperCAmelCase : List[Any] = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , snake_case )
UpperCAmelCase : Tuple = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
UpperCAmelCase : Optional[Any] = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(snake_case )[0]
UpperCAmelCase : Dict = 5_0_2_6_5
UpperCAmelCase : List[str] = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , snake_case )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
| 311 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) != 32:
raise ValueError("Input must be of length 32" )
UpperCAmelCase : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:]
UpperCAmelCase : List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = b""
for char in message:
bit_string += format(__magic_name__ , "08b" ).encode("utf-8" )
UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512]
UpperCAmelCase : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Any = format(__magic_name__ , "032b" )
UpperCAmelCase : int = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return (a + b) % 2**32
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = preprocess(__magic_name__ )
UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCAmelCase : List[str] = 0X67452301
UpperCAmelCase : Tuple = 0XEFCDAB89
UpperCAmelCase : List[Any] = 0X98BADCFE
UpperCAmelCase : List[str] = 0X10325476
UpperCAmelCase : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCAmelCase : Optional[Any] = aa
UpperCAmelCase : List[Any] = ba
UpperCAmelCase : Optional[Any] = ca
UpperCAmelCase : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCAmelCase : Tuple = d ^ (b & (c ^ d))
UpperCAmelCase : List[str] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCAmelCase : int = c ^ (d & (b ^ c))
UpperCAmelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
UpperCAmelCase : Any = b ^ c ^ d
UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ ))
UpperCAmelCase : Dict = (7 * i) % 16
UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCAmelCase : List[Any] = d
UpperCAmelCase : Any = c
UpperCAmelCase : Dict = b
UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : int = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 148 |
import os
def _A ( ):
"""simple docstring"""
a__ : Optional[int] =os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE ) , "num.txt" )
with open(SCREAMING_SNAKE_CASE ) as file_hand:
return str(sum(int(SCREAMING_SNAKE_CASE ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 148 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class a__ ( snake_case__ ):
_a : List[str] = """marian"""
_a : Optional[int] = ["""past_key_values"""]
_a : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , _A=5_8_1_0_1 , _A=None , _A=1_0_2_4 , _A=1_2 , _A=4_0_9_6 , _A=1_6 , _A=1_2 , _A=4_0_9_6 , _A=1_6 , _A=0.0 , _A=0.0 , _A=True , _A=True , _A="gelu" , _A=1_0_2_4 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.02 , _A=5_8_1_0_0 , _A=False , _A=5_8_1_0_0 , _A=0 , _A=0 , _A=True , **_A , ):
"""simple docstring"""
__lowerCAmelCase = vocab_size
__lowerCAmelCase = decoder_vocab_size or vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , )
class a__ ( snake_case__ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__lowerCAmelCase = {0: "batch"}
__lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
__lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_A , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
for i in range(_A ):
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
else:
__lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = super().outputs
else:
__lowerCAmelCase = super(_A , self ).outputs
if self.use_past:
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
for i in range(_A ):
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
__lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __SCREAMING_SNAKE_CASE( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
"""simple docstring"""
__lowerCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
_A , _A , _A , _A , _A )
# Generate decoder inputs
__lowerCAmelCase = seq_length if not self.use_past else 1
__lowerCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
_A , _A , _A , _A , _A )
__lowerCAmelCase = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__lowerCAmelCase = dict(**_A , **_A )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["input_ids"].shape
__lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1]
__lowerCAmelCase , __lowerCAmelCase = self.num_attention_heads
__lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCAmelCase = decoder_seq_length + 3
__lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowerCAmelCase = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_A , _A )] , dim=1 )
__lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
__lowerCAmelCase = min(_A , _A )
__lowerCAmelCase = max(_A , _A ) - min_num_layers
__lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_A ):
common_inputs["past_key_values"].append(
(
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
) )
# TODO: test this.
__lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_A , _A ):
common_inputs["past_key_values"].append((torch.zeros(_A ), torch.zeros(_A )) )
return common_inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
"""simple docstring"""
__lowerCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
_A , _A , _A , _A , _A )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase , __lowerCAmelCase = self.num_layers
__lowerCAmelCase , __lowerCAmelCase = self.num_attention_heads
__lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCAmelCase = common_inputs["attention_mask"].dtype
__lowerCAmelCase = torch.cat(
[common_inputs["attention_mask"], torch.ones(_A , _A , dtype=_A )] , dim=1 )
__lowerCAmelCase = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(_A )
]
return common_inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
"""simple docstring"""
__lowerCAmelCase = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCAmelCase = tokenizer.num_special_tokens_to_add(_A )
__lowerCAmelCase = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
__lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowerCAmelCase = dict(tokenizer(_A , return_tensors=_A ) )
return common_inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
else:
__lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
return common_inputs
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase = super()._flatten_past_key_values_(_A , _A , _A , _A )
else:
__lowerCAmelCase = super(_A , self )._flatten_past_key_values_(
_A , _A , _A , _A )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return 1E-4
| 92 |
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
return self.node_position[vertex]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = pos
def snake_case ( self , __a , __a , __a , __a ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCAmelCase = 2 * start + 1
else:
__lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCAmelCase , __lowerCAmelCase = heap[smallest_child], positions[smallest_child]
__lowerCAmelCase , __lowerCAmelCase = (
heap[start],
positions[start],
)
__lowerCAmelCase , __lowerCAmelCase = temp, tempa
__lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __a )
self.top_to_bottom(__a , __a , __a , __a )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = position[index]
while index != 0:
__lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCAmelCase = heap[parent]
__lowerCAmelCase = position[parent]
self.set_position(position[parent] , __a )
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , __a )
break
__lowerCAmelCase = parent
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , 0 )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = len(__a ) // 2 - 1
for i in range(__a , -1 , -1 ):
self.top_to_bottom(__a , __a , len(__a ) , __a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = positions[0]
__lowerCAmelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a ) , __a )
return temp
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Heap()
__lowerCAmelCase = [0] * len(_UpperCamelCase )
__lowerCAmelCase = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCAmelCase = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = 1
__lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCAmelCase = 0
__lowerCAmelCase = distance
heap.heapify(_UpperCamelCase , _UpperCamelCase )
for _ in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
__lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Optional[Any] = int(input("Enter number of edges: ").strip())
A : Dict = defaultdict(list)
for _ in range(edges_number):
A : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 57 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase__( folder_based_builder.FolderBasedBuilderConfig):
UpperCAmelCase__ : bool = None
UpperCAmelCase__ : bool = None
class lowerCamelCase__( folder_based_builder.FolderBasedBuilder):
UpperCAmelCase__ : List[Any] = datasets.Audio()
UpperCAmelCase__ : str = 'audio'
UpperCAmelCase__ : Union[str, Any] = AudioFolderConfig
UpperCAmelCase__ : List[str] # definition at the bottom of the script
UpperCAmelCase__ : Optional[int] = AudioClassification(audio_column='audio' , label_column='label')
UpperCAmelCase_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
UpperCAmelCase_ = AUDIO_EXTENSIONS
| 29 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowerCamelCase : Union[str, Any] = int(input('Enter number: ').strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 2 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (A ) -> bool:
"""simple docstring"""
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__UpperCAmelCase :Any = None
__UpperCAmelCase :List[str] = logging.get_logger(__name__)
__UpperCAmelCase :List[str] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase :List[Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase :str = {
"camembert-base": 5_1_2,
}
__UpperCAmelCase :Optional[int] = "▁"
class a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Tuple = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE : Optional[Any] = CamembertTokenizer
def __init__( self : Optional[Any] , snake_case : Optional[Any]=None , snake_case : Any=None , snake_case : Any="<s>" , snake_case : str="</s>" , snake_case : str="</s>" , snake_case : str="<s>" , snake_case : Tuple="<unk>" , snake_case : Optional[Any]="<pad>" , snake_case : Dict="<mask>" , snake_case : Tuple=["<s>NOTUSED", "</s>NOTUSED"] , **snake_case : int , ) -> List[str]:
__UpperCAmelCase : List[str] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , additional_special_tokens=_A , **_A , )
__UpperCAmelCase : Dict = vocab_file
__UpperCAmelCase : Optional[Any] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Dict , snake_case : str , snake_case : str = None ) -> Optional[Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : Optional[int] = [self.cls_token_id]
__UpperCAmelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : Optional[Any] , snake_case : int = None ) -> str:
__UpperCAmelCase : List[Any] = [self.sep_token_id]
__UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : int , snake_case : Union[str, Any] , snake_case : List[str] = None ) -> str:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCAmelCase : int = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,) | 364 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase :Any = logging.get_logger(__name__)
__UpperCAmelCase :Dict = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowercase : Tuple ):
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Any = k.replace(_lowercase , _lowercase )
if k.startswith('''encoder''' ):
__UpperCAmelCase : str = k.replace('''.attn''' , '''.self_attn''' )
__UpperCAmelCase : Any = k.replace('''norm1''' , '''self_attn_layer_norm''' )
__UpperCAmelCase : List[str] = k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
__UpperCAmelCase : int = k.replace('''norm1''' , '''self_attn_layer_norm''' )
__UpperCAmelCase : Union[str, Any] = k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
__UpperCAmelCase : List[Any] = k.replace('''norm3''' , '''final_layer_norm''' )
return k
def _a ( _lowercase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
__UpperCAmelCase : Any = sd.pop(_lowercase )
__UpperCAmelCase : Optional[int] = k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
__UpperCAmelCase :str = ["START"]
@torch.no_grad()
def _a ( _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Any = torch.load(_lowercase , map_location='''cpu''' )
__UpperCAmelCase : List[str] = model['''model''']
__UpperCAmelCase : Optional[Any] = BlenderbotConfig.from_json_file(_lowercase )
__UpperCAmelCase : Optional[Any] = BlenderbotForConditionalGeneration(_lowercase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : int = []
__UpperCAmelCase : List[str] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(_lowercase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : Union[str, Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowercase )
m.model.load_state_dict(_lowercase , strict=_lowercase )
m.half()
m.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCAmelCase :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCAmelCase :Tuple = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 240 | 0 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 322 |
import math
import qiskit
def _a ( SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
__lowerCAmelCase: Union[str, Any] = qiskit.QuantumRegister(4 , 'qr' )
__lowerCAmelCase: List[Any] = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
__lowerCAmelCase: Any = [input_a, input_a, carry_in]
__lowerCAmelCase: List[str] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__lowerCAmelCase: List[str] = qiskit.Aer.get_backend('aer_simulator' )
__lowerCAmelCase: List[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=10_00 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 322 | 1 |
'''simple docstring'''
def lowercase (_A = 2_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Tuple = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
_lowerCAmelCase : List[str] = [0] * (pence + 1)
_lowerCAmelCase : Tuple = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_A , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 25 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : Optional[int] = 'pt'
_lowerCAmelCase : Tuple = 'tf'
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ )
model_tf.save_pretrained(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : Any = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase : int = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase : str = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
| 25 | 1 |
'''simple docstring'''
def a_ ( __snake_case : float , __snake_case : float ) -> float:
"""simple docstring"""
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(__snake_case ) * abs(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 75 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
_UpperCamelCase = ksize + 1
_UpperCamelCase = np.zeros((ksize, ksize), dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
_UpperCamelCase = x - ksize // 2
_UpperCamelCase = y - ksize // 2
# degree to radiant
_UpperCamelCase = theta / 1_80 * np.pi
_UpperCamelCase = np.cos(_theta )
_UpperCamelCase = np.sin(_theta )
# get kernel x
_UpperCamelCase = cos_theta * px + sin_theta * py
# get kernel y
_UpperCamelCase = -sin_theta * px + cos_theta * py
# fill kernel
_UpperCamelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_a = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
_a = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_a = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_a = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_a = out / out.max() * 255
_a = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 194 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""]
_SCREAMING_SNAKE_CASE = """BlipImageProcessor"""
_SCREAMING_SNAKE_CASE = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = False
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = self.image_processor
def __call__( self : List[str] , UpperCamelCase__ : ImageInput = None , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : int , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
UpperCamelCase = self.tokenizer
UpperCamelCase = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
return text_encoding
# add pixel_values
UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
if text is not None:
UpperCamelCase = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
else:
UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase__ )
return encoding_image_processor
def A ( self : Dict , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Optional[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 249 |
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_lowerCamelCase : List[Any] = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = "hopper-medium-v2"
_lowerCamelCase : Optional[int] = gym.make(env_name)
_lowerCamelCase : Any = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
_lowerCamelCase : Dict = env.reset()
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : str = 0
_lowerCamelCase : Dict = 1000
_lowerCamelCase : Tuple = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_lowerCamelCase : List[str] = pipeline(obs, planning_horizon=32)
# execute action in environment
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase : Dict = env.step(denorm_actions)
_lowerCamelCase : Optional[Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
f''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
_lowerCamelCase : Dict = next_observation
except KeyboardInterrupt:
pass
print(f'''Total reward: {total_reward}''')
| 249 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = CycleDiffusionPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
lowercase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D"""), up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D"""), cross_attention_dim=32, )
_snake_case : Optional[int] = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="""scaled_linear""", num_train_timesteps=1_000, clip_sample=_A, set_alpha_to_one=_A, )
torch.manual_seed(0 )
_snake_case : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], latent_channels=4, )
torch.manual_seed(0 )
_snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, )
_snake_case : Any = CLIPTextModel(_A )
_snake_case : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_snake_case : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase_ ( self: Optional[Any], a_: Union[str, Any], a_: Optional[int]=0 ):
'''simple docstring'''
_snake_case : Union[str, Any] = floats_tensor((1, 3, 32, 32), rng=random.Random(_A ) ).to(_A )
_snake_case : Union[str, Any] = image / 2 + 0.5
if str(_A ).startswith("""mps""" ):
_snake_case : Dict = torch.manual_seed(_A )
else:
_snake_case : int = torch.Generator(device=_A ).manual_seed(_A )
_snake_case : Optional[int] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[Any] = self.get_dummy_components()
_snake_case : Union[str, Any] = CycleDiffusionPipeline(**_A )
_snake_case : List[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_snake_case : List[Any] = self.get_dummy_inputs(_A )
_snake_case : str = pipe(**_A )
_snake_case : List[Any] = output.images
_snake_case : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_snake_case : Tuple = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""", """This test requires a GPU""" )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : int = self.get_dummy_components()
for name, module in components.items():
if hasattr(_A, """half""" ):
_snake_case : List[Any] = module.half()
_snake_case : Dict = CycleDiffusionPipeline(**_A )
_snake_case : Dict = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_snake_case : Union[str, Any] = self.get_dummy_inputs(_A )
_snake_case : Dict = pipe(**_A )
_snake_case : Optional[int] = output.images
_snake_case : List[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
_snake_case : str = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
_snake_case : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
_snake_case : str = init_image.resize((512, 512) )
_snake_case : int = """CompVis/stable-diffusion-v1-4"""
_snake_case : List[str] = DDIMScheduler.from_pretrained(_A, subfolder="""scheduler""" )
_snake_case : Union[str, Any] = CycleDiffusionPipeline.from_pretrained(
_A, scheduler=_A, safety_checker=_A, torch_dtype=torch.floataa, revision="""fp16""" )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
_snake_case : Dict = """A black colored car"""
_snake_case : Tuple = """A blue colored car"""
_snake_case : Optional[int] = torch.manual_seed(0 )
_snake_case : str = pipe(
prompt=_A, source_prompt=_A, image=_A, num_inference_steps=100, eta=0.1, strength=0.85, guidance_scale=3, source_guidance_scale=1, generator=_A, output_type="""np""", )
_snake_case : int = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
_snake_case : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
_snake_case : int = init_image.resize((512, 512) )
_snake_case : Any = """CompVis/stable-diffusion-v1-4"""
_snake_case : Dict = DDIMScheduler.from_pretrained(_A, subfolder="""scheduler""" )
_snake_case : List[str] = CycleDiffusionPipeline.from_pretrained(_A, scheduler=_A, safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
_snake_case : Optional[int] = """A black colored car"""
_snake_case : List[Any] = """A blue colored car"""
_snake_case : Optional[Any] = torch.manual_seed(0 )
_snake_case : str = pipe(
prompt=_A, source_prompt=_A, image=_A, num_inference_steps=100, eta=0.1, strength=0.85, guidance_scale=3, source_guidance_scale=1, generator=_A, output_type="""np""", )
_snake_case : Optional[Any] = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 64 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = MvpTokenizer
UpperCamelCase__ : Tuple = MvpTokenizerFast
UpperCamelCase__ : int = True
UpperCamelCase__ : Tuple = filter_roberta_detectors
def _A ( self ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__SCREAMING_SNAKE_CASE = dict(zip(_A , range(len(_A ) ) ) )
__SCREAMING_SNAKE_CASE = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__SCREAMING_SNAKE_CASE = {'unk_token': '<unk>'}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
def _A ( self , **_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _A ( self , **_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _A ( self , _A ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def _A ( self ):
'''simple docstring'''
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def _A ( self ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__SCREAMING_SNAKE_CASE = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE = tokenizer(_A , max_length=len(_A ) , padding=_A , return_tensors='pt' )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A )
# Test that special tokens are reset
@require_torch
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE = tokenizer(_A , padding=_A , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , _A )
self.assertIn('attention_mask' , _A )
self.assertNotIn('labels' , _A )
self.assertNotIn('decoder_attention_mask' , _A )
@require_torch
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE = tokenizer(text_target=_A , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def _A ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE = tokenizer(
['I am a small frog' * 1_024, 'I am a small frog'] , padding=_A , truncation=_A , return_tensors='pt' )
self.assertIsInstance(_A , _A )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.']
__SCREAMING_SNAKE_CASE = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE = tokenizer(_A , text_target=_A , return_tensors='pt' )
__SCREAMING_SNAKE_CASE = inputs['input_ids']
__SCREAMING_SNAKE_CASE = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _A ( self ):
'''simple docstring'''
pass
def _A ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE = 'A, <mask> AllenNLP sentence.'
__SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
__SCREAMING_SNAKE_CASE = tokenizer_p.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 257 | 0 |
lowerCamelCase__ = range(2, 20 + 1)
lowerCamelCase__ = [10**k for k in range(ks[-1] + 1)]
lowerCamelCase__ = {}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCAmelCase__ : Dict = sum(a_i[j] for j in range(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase__ : Optional[Any] = 0, 0
lowerCAmelCase__ : Tuple = n - i
lowerCAmelCase__ : Optional[int] = memo.get(__SCREAMING_SNAKE_CASE )
if sub_memo is not None:
lowerCAmelCase__ : List[str] = sub_memo.get(__SCREAMING_SNAKE_CASE )
if jumps is not None and len(__SCREAMING_SNAKE_CASE ) > 0:
# find and make the largest jump without going over
lowerCAmelCase__ : Union[str, Any] = -1
for _k in range(len(__SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCAmelCase__ : Any = _k
break
if max_jump >= 0:
lowerCAmelCase__ : Dict = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCAmelCase__ : Tuple = diff + c
for j in range(min(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ) ):
lowerCAmelCase__ : int = divmod(__SCREAMING_SNAKE_CASE , 10 )
if new_c > 0:
add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ : Optional[int] = []
else:
lowerCAmelCase__ : int = {c: []}
lowerCAmelCase__ : Union[str, Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCAmelCase__ : Any = next_term(__SCREAMING_SNAKE_CASE , k - 1 , i + dn , __SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCAmelCase__ : Tuple = compute(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i + dn , __SCREAMING_SNAKE_CASE )
diff += _diff
dn += terms_jumped
lowerCAmelCase__ : Tuple = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCAmelCase__ : Any = 0
while j < len(__SCREAMING_SNAKE_CASE ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__SCREAMING_SNAKE_CASE , (diff, dn, k) )
return (diff, dn)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if i >= n:
return 0, i
if k > len(__SCREAMING_SNAKE_CASE ):
a_i.extend([0 for _ in range(k - len(__SCREAMING_SNAKE_CASE ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCAmelCase__ : List[Any] = i
lowerCAmelCase__ : Optional[int] = 0, 0, 0
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCAmelCase__ : int = ds_c + ds_b
diff += addend
lowerCAmelCase__ : Union[str, Any] = 0
for j in range(__SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Tuple = a_i[j] + addend
lowerCAmelCase__ : Optional[Any] = divmod(__SCREAMING_SNAKE_CASE , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return diff, i - start_i
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
for j in range(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ : List[str] = digits[j] + addend
if s >= 10:
lowerCAmelCase__ : Union[str, Any] = divmod(__SCREAMING_SNAKE_CASE , 10 )
lowerCAmelCase__ : Dict = addend // 10 + quotient
else:
lowerCAmelCase__ : Union[str, Any] = s
lowerCAmelCase__ : Any = addend // 10
if addend == 0:
break
while addend > 0:
lowerCAmelCase__ : Union[str, Any] = divmod(__SCREAMING_SNAKE_CASE , 10 )
digits.append(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10**15 ) -> int:
lowerCAmelCase__ : Any = [1]
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Tuple = 0
while True:
lowerCAmelCase__ : Union[str, Any] = next_term(__SCREAMING_SNAKE_CASE , 20 , i + dn , __SCREAMING_SNAKE_CASE )
dn += terms_jumped
if dn == n - i:
break
lowerCAmelCase__ : Optional[Any] = 0
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""") | 370 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A__ :
def __init__( self : List[str] , a : Any , a : Dict=13 , a : Optional[Any]=7 , a : Tuple=True , a : Tuple=True , a : Dict=False , a : Optional[Any]=True , a : Dict=99 , a : Tuple=32 , a : Optional[Any]=5 , a : str=4 , a : Union[str, Any]=37 , a : Any="gelu" , a : Dict=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Union[str, Any]=16 , a : Optional[int]=2 , a : Optional[Any]=0.0_2 , a : List[Any]=3 , a : Any=4 , a : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Tuple = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : int = type_vocab_size
lowerCAmelCase__ : int = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : List[Any] = scope
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : str = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : str , a : Union[str, Any] , a : Optional[Any] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : str = LlamaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a )
lowerCAmelCase__ : Union[str, Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : int , a : Any , a : Union[str, Any] , a : Dict , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Dict , a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = LlamaModel(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , encoder_hidden_states=a , )
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : List[Any] , a : int , a : Tuple , a : List[Any] , a : Union[str, Any] , a : Any , a : List[str] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : str , a : Union[str, Any] , a : Optional[Any] , a : List[Any] , a : Optional[Any] , a : Optional[Any] , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : List[Any] = LlamaForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
lowerCAmelCase__ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : Any = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
lowerCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = LlamaModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ : int = type
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : Dict = input_dict['input_ids']
lowerCAmelCase__ : Optional[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : Tuple = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = 3
lowerCAmelCase__ : List[str] = 'single_label_classification'
lowerCAmelCase__ : List[Any] = input_dict['input_ids']
lowerCAmelCase__ : List[Any] = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : int = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : Optional[Any] = 'multi_label_classification'
lowerCAmelCase__ : List[str] = input_dict['input_ids']
lowerCAmelCase__ : Tuple = input_ids.ne(1 ).to(a )
lowerCAmelCase__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ : Dict = LlamaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , labels=a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowerCamelCase ( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : List[Any] = LlamaModel(a )
original_model.to(a )
original_model.eval()
lowerCAmelCase__ : List[Any] = original_model(a ).last_hidden_state
lowerCAmelCase__ : str = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase__ : Any = {'type': scaling_type, 'factor': 1_0.0}
lowerCAmelCase__ : Union[str, Any] = LlamaModel(a )
scaled_model.to(a )
scaled_model.eval()
lowerCAmelCase__ : Union[str, Any] = scaled_model(a ).last_hidden_state
lowerCAmelCase__ : Optional[int] = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1E-5 ) )
@require_torch
class A__ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowerCAmelCase__ : Any = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Dict = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : Any = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowerCAmelCase__ : str = model(torch.tensor(a ) )
# Expected mean on dim = -1
lowerCAmelCase__ : str = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase__ : List[str] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowerCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowerCAmelCase__ : List[str] = model(torch.tensor(a ) )
lowerCAmelCase__ : int = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase__ : Optional[int] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowerCAmelCase__ : Tuple = 'Simply put, the theory of relativity states that '
lowerCAmelCase__ : Dict = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowerCAmelCase__ : Dict = tokenizer.encode(a , return_tensors='pt' )
lowerCAmelCase__ : str = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=a )
# greedy generation outputs
lowerCAmelCase__ : Optional[Any] = model.generate(a , max_new_tokens=64 , top_p=a , temperature=1 , do_sample=a )
lowerCAmelCase__ : Tuple = tokenizer.decode(generated_ids[0] , skip_special_tokens=a )
self.assertEqual(a , a ) | 307 | 0 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = 42
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 88 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "geglu" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__()
A__ = num_attention_heads
A__ = attention_head_dim
A__ = num_attention_heads * attention_head_dim
A__ = in_channels
A__ = torch.nn.GroupNorm(num_groups=UpperCAmelCase__ , num_channels=UpperCAmelCase__ , eps=1e-6 , affine=UpperCAmelCase__)
A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__)
# 3. Define transformers blocks
A__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , dropout=UpperCAmelCase__ , cross_attention_dim=UpperCAmelCase__ , activation_fn=UpperCAmelCase__ , attention_bias=UpperCAmelCase__ , double_self_attention=UpperCAmelCase__ , norm_elementwise_affine=UpperCAmelCase__ , )
for d in range(UpperCAmelCase__)
])
A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : int=None , UpperCAmelCase__ : bool = True , ) ->List[str]:
'''simple docstring'''
A__ , A__ , A__ , A__ = hidden_states.shape
A__ = batch_frames // num_frames
A__ = hidden_states
A__ = hidden_states[None, :].reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
A__ = hidden_states.permute(0 , 2 , 1 , 3 , 4)
A__ = self.norm(UpperCAmelCase__)
A__ = hidden_states.permute(0 , 3 , 4 , 2 , 1).reshape(batch_size * height * width , UpperCAmelCase__ , UpperCAmelCase__)
A__ = self.proj_in(UpperCAmelCase__)
# 2. Blocks
for block in self.transformer_blocks:
A__ = block(
UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , timestep=UpperCAmelCase__ , cross_attention_kwargs=UpperCAmelCase__ , class_labels=UpperCAmelCase__ , )
# 3. Output
A__ = self.proj_out(UpperCAmelCase__)
A__ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
.permute(0 , 3 , 4 , 1 , 2)
.contiguous()
)
A__ = hidden_states.reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
A__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase__)
| 14 | import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
a_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __lowercase ( lowerCamelCase : Optional[Any] ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : str ):
return max(metric_fn(lowerCamelCase , lowerCamelCase ) for gt in ground_truths )
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Dict ):
UpperCamelCase_ : Tuple = [line.strip() for line in open(lowerCamelCase , 'r' ).readlines()]
UpperCamelCase_ : List[Any] = []
if args.gold_data_mode == "qa":
UpperCamelCase_ : Union[str, Any] = pd.read_csv(lowerCamelCase , sep='\t' , header=lowerCamelCase )
for answer_list in data[1]:
UpperCamelCase_ : Optional[int] = ast.literal_eval(lowerCamelCase )
answers.append(lowerCamelCase )
else:
UpperCamelCase_ : int = [line.strip() for line in open(lowerCamelCase , 'r' ).readlines()]
UpperCamelCase_ : Optional[int] = [[reference] for reference in references]
UpperCamelCase_ : Optional[int] = 0
for prediction, ground_truths in zip(lowerCamelCase , lowerCamelCase ):
total += 1
em += metric_max_over_ground_truths(lowerCamelCase , lowerCamelCase , lowerCamelCase )
fa += metric_max_over_ground_truths(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = 1_0_0.0 * em / total
UpperCamelCase_ : List[Any] = 1_0_0.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : List[str] ):
UpperCamelCase_ : Optional[int] = args.k
UpperCamelCase_ : List[Any] = [line.strip() for line in open(lowerCamelCase , 'r' ).readlines()]
UpperCamelCase_ : List[str] = [line.strip() for line in open(lowerCamelCase , 'r' ).readlines()]
UpperCamelCase_ : List[str] = 0
for hypo, reference in zip(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_ : List[str] = set(hypo.split('\t' )[:k] )
UpperCamelCase_ : int = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCamelCase_ : Union[str, Any] = 1_0_0.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __lowercase ( lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : Any ):
def strip_title(lowerCamelCase : List[str] ):
if title.startswith('"' ):
UpperCamelCase_ : List[str] = title[1:]
if title.endswith('"' ):
UpperCamelCase_ : int = title[:-1]
return title
UpperCamelCase_ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase , return_tensors='pt' , padding=lowerCamelCase , truncation=lowerCamelCase , )['input_ids'].to(args.device )
UpperCamelCase_ : int = rag_model.rag.question_encoder(lowerCamelCase )
UpperCamelCase_ : List[str] = question_enc_outputs[0]
UpperCamelCase_ : Tuple = rag_model.retriever(
lowerCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
UpperCamelCase_ : str = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCamelCase_ : int = []
for docs in all_docs:
UpperCamelCase_ : Union[str, Any] = [strip_title(lowerCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(lowerCamelCase ) )
return provenance_strings
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
with torch.no_grad():
UpperCamelCase_ : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase , return_tensors='pt' , padding=lowerCamelCase , truncation=lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = inputs_dict.input_ids.to(args.device )
UpperCamelCase_ : str = inputs_dict.attention_mask.to(args.device )
UpperCamelCase_ : List[Any] = rag_model.generate( # rag_model overwrites generate
lowerCamelCase , attention_mask=lowerCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
UpperCamelCase_ : str = rag_model.retriever.generator_tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
if args.print_predictions:
for q, a in zip(lowerCamelCase , lowerCamelCase ):
logger.info('Q: {} - A: {}'.format(lowerCamelCase , lowerCamelCase ) )
return answers
def __lowercase ( ):
UpperCamelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=lowerCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=lowerCamelCase , choices=['exact', 'compressed', 'legacy'] , type=lowerCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=lowerCamelCase , type=lowerCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=lowerCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=lowerCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=lowerCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=lowerCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=lowerCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=lowerCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=lowerCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=lowerCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=lowerCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
UpperCamelCase_ : Union[str, Any] = parser.parse_args()
UpperCamelCase_ : Union[str, Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : Any = {}
if args.model_type is None:
UpperCamelCase_ : List[Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
UpperCamelCase_ : Optional[int] = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
UpperCamelCase_ : Dict = args.n_docs
if args.index_name is not None:
UpperCamelCase_ : Union[str, Any] = args.index_name
if args.index_path is not None:
UpperCamelCase_ : str = args.index_path
else:
UpperCamelCase_ : Tuple = BartForConditionalGeneration
UpperCamelCase_ : Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , lowerCamelCase )
UpperCamelCase_ : Optional[int] = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
UpperCamelCase_ : Optional[int] = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(lowerCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(lowerCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
UpperCamelCase_ : List[str] = RagRetriever.from_pretrained(lowerCamelCase , **lowerCamelCase )
UpperCamelCase_ : List[Any] = model_class.from_pretrained(lowerCamelCase , retriever=lowerCamelCase , **lowerCamelCase )
model.retriever.init_retrieval()
else:
UpperCamelCase_ : Optional[Any] = model_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
UpperCamelCase_ : Optional[Any] = []
for line in tqdm(lowerCamelCase ):
questions.append(line.strip() )
if len(lowerCamelCase ) == args.eval_batch_size:
UpperCamelCase_ : Dict = evaluate_batch_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase )
preds_file.write('\n'.join(lowerCamelCase ) + '\n' )
preds_file.flush()
UpperCamelCase_ : Tuple = []
if len(lowerCamelCase ) > 0:
UpperCamelCase_ : Optional[int] = evaluate_batch_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase )
preds_file.write('\n'.join(lowerCamelCase ) )
preds_file.flush()
score_fn(lowerCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
a_ = get_args()
main(args)
| 175 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "deberta-v2"
def __init__( self : Union[str, Any] , _lowerCAmelCase : Dict=128_100 , _lowerCAmelCase : List[str]=1_536 , _lowerCAmelCase : Optional[int]=24 , _lowerCAmelCase : List[str]=24 , _lowerCAmelCase : str=6_144 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Any=512 , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : Any=1E-7 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : int=-1 , _lowerCAmelCase : Tuple=0 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Optional[Any]="gelu" , **_lowerCAmelCase : int , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = relative_attention
SCREAMING_SNAKE_CASE_ = max_relative_positions
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = position_biased_input
# Backwards compatibility
if type(_lowerCAmelCase ) == str:
SCREAMING_SNAKE_CASE_ = [x.strip() for x in pos_att_type.lower().split('|' )]
SCREAMING_SNAKE_CASE_ = pos_att_type
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = kwargs.get('pooler_hidden_size' , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = pooler_dropout
SCREAMING_SNAKE_CASE_ = pooler_hidden_act
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Any ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 12
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional["TensorType"] = None , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 40 , _lowerCAmelCase : int = 40 , _lowerCAmelCase : "PreTrainedTokenizerBase" = None , ):
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(preprocessor=_lowerCAmelCase , framework=_lowerCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs | 210 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : Tuple=10 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Dict=32 * 8 , _lowerCAmelCase : List[str]=32 * 8 , _lowerCAmelCase : List[Any]=4 , _lowerCAmelCase : Optional[Any]=64 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = min_size
SCREAMING_SNAKE_CASE_ = max_size
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = hidden_dim
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCAmelCase ) > 0.5
).float()
SCREAMING_SNAKE_CASE_ = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCAmelCase ) > 0.5).long()
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
SCREAMING_SNAKE_CASE_ = self.num_queries
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = [1, 1, 1, 1]
SCREAMING_SNAKE_CASE_ = self.num_channels
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = self.hidden_dim
SCREAMING_SNAKE_CASE_ = self.hidden_dim
SCREAMING_SNAKE_CASE_ = self.hidden_dim
return config
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = output.encoder_hidden_states
SCREAMING_SNAKE_CASE_ = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , config.decoder_layers )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Any=False ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = MaskaFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
def comm_check_on_output(_lowerCAmelCase : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(
pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowercase_ = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowerCAmelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowerCAmelCase_ ( self : List[Any] ):
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowerCAmelCase_ ( self : Tuple ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowerCAmelCase_ ( self : Any ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : int ):
pass
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Any ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
SCREAMING_SNAKE_CASE_ = MaskaFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_lowerCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_lowerCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_lowerCAmelCase ).long(),
}
SCREAMING_SNAKE_CASE_ = self.model_tester.get_config()
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase_ ( self : List[str] ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ).loss
loss.backward()
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ : Tuple = 1E-4
def UpperCAmelCase_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Optional[int] ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase_ ( self : int ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
# masks_queries_logits
SCREAMING_SNAKE_CASE_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
SCREAMING_SNAKE_CASE_ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [el.to(_lowerCAmelCase ) for el in inputs['mask_labels']]
SCREAMING_SNAKE_CASE_ = [el.to(_lowerCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None ) | 210 | 1 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
snake_case_ : Union[str, Any] = 'bert-base-cased'
snake_case_ : Any = 'google/pegasus-xsum'
snake_case_ : Tuple = [' Sam ate lunch today.', 'Sams lunch ingredients.']
snake_case_ : List[str] = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
snake_case_ : int = 'patrickvonplaten/t5-tiny-random'
snake_case_ : Union[str, Any] = 'sshleifer/bart-tiny-random'
snake_case_ : Any = 'sshleifer/tiny-mbart'
snake_case_ : Any = 'sshleifer/tiny-marian-en-de'
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Any = '\n'.join(UpperCAmelCase_ )
Path(UpperCAmelCase_ ).open('w' ).writelines(UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(UpperCAmelCase_ , f'{split}.source' ) , UpperCAmelCase_ )
_dump_articles(os.path.join(UpperCAmelCase_ , f'{split}.target' ) , UpperCAmelCase_ )
return tmp_dir
class lowercase__ ( lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] ,)
@slow
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : int = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_UpperCamelCase : Optional[Any] = max(len(tokenizer.encode(lowerCamelCase__ ) ) for a in ARTICLES )
_UpperCamelCase : str = max(len(tokenizer.encode(lowerCamelCase__ ) ) for a in SUMMARIES )
_UpperCamelCase : Dict = 4
_UpperCamelCase : List[Any] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_UpperCamelCase , _UpperCamelCase : int = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_UpperCamelCase : Optional[Any] = SeqaSeqDataset(
lowerCamelCase__ ,data_dir=lowerCamelCase__ ,type_path='train' ,max_source_length=lowerCamelCase__ ,max_target_length=lowerCamelCase__ ,src_lang=lowerCamelCase__ ,tgt_lang=lowerCamelCase__ ,)
_UpperCamelCase : Union[str, Any] = DataLoader(lowerCamelCase__ ,batch_size=2 ,collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase__ ,lowerCamelCase__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_UpperCamelCase : Tuple = shift_tokens_right(batch['labels'] ,tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Any ):
'''simple docstring'''
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Any = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_UpperCamelCase : Optional[Any] = max(len(tokenizer.encode(lowerCamelCase__ ) ) for a in ARTICLES )
_UpperCamelCase : Dict = max(len(tokenizer.encode(lowerCamelCase__ ) ) for a in SUMMARIES )
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : List[str] = LegacySeqaSeqDataset(
lowerCamelCase__ ,data_dir=lowerCamelCase__ ,type_path='train' ,max_source_length=20 ,max_target_length=lowerCamelCase__ ,)
_UpperCamelCase : Any = DataLoader(lowerCamelCase__ ,batch_size=2 ,collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : str = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_UpperCamelCase : Dict = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_UpperCamelCase : Dict = tmp_dir.joinpath('train.source' ).open().readlines()
_UpperCamelCase : Any = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase__ ,lowerCamelCase__ ,128 ,lowerCamelCase__ )
_UpperCamelCase : Tuple = {x.name for x in tmp_dir.iterdir()}
_UpperCamelCase : Dict = {x.name for x in save_dir.iterdir()}
_UpperCamelCase : Union[str, Any] = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase__ ) < len(lowerCamelCase__ )
assert len(lowerCamelCase__ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE ,reason='This test requires fairseq' )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Dict = self._get_dataset(max_len=64 )
_UpperCamelCase : Optional[Any] = 64
_UpperCamelCase : str = ds.make_dynamic_sampler(lowerCamelCase__ ,required_batch_size_multiple=lowerCamelCase__ )
_UpperCamelCase : Tuple = [len(lowerCamelCase__ ) for x in batch_sampler]
assert len(set(lowerCamelCase__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase__ ) == len(lowerCamelCase__ ) # no dropped or added examples
_UpperCamelCase : str = DataLoader(lowerCamelCase__ ,batch_sampler=lowerCamelCase__ ,collate_fn=ds.collate_fn ,num_workers=2 )
_UpperCamelCase : Any = []
_UpperCamelCase : Tuple = []
for batch in data_loader:
_UpperCamelCase : str = batch['input_ids'].shape
_UpperCamelCase : List[Any] = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_UpperCamelCase : Dict = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCamelCase__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase__ )
assert num_src_per_batch[0] == max(lowerCamelCase__ )
if failures:
raise AssertionError(F'too many tokens in {len(lowerCamelCase__ )} batches' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = self._get_dataset(max_len=512 )
_UpperCamelCase : Dict = 2
_UpperCamelCase : Any = ds.make_sortish_sampler(lowerCamelCase__ ,shuffle=lowerCamelCase__ )
_UpperCamelCase : List[Any] = DataLoader(lowerCamelCase__ ,batch_size=lowerCamelCase__ ,collate_fn=ds.collate_fn ,num_workers=2 )
_UpperCamelCase : List[str] = DataLoader(lowerCamelCase__ ,batch_size=lowerCamelCase__ ,collate_fn=ds.collate_fn ,num_workers=2 ,sampler=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase__ : str ,lowerCamelCase__ : Union[str, Any]="input_ids" ):
return [batch[k].eq(lowerCamelCase__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase__ ,k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase__ ,k='labels' ) )
assert sum(count_pad_tokens(lowerCamelCase__ ) ) < sum(count_pad_tokens(lowerCamelCase__ ) )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : Optional[int]=1000 ,lowerCamelCase__ : Union[str, Any]=128 ):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' ,lowerCamelCase__ ):
_UpperCamelCase : List[Any] = 'examples/seq2seq/wmt_en_ro'
_UpperCamelCase : int = max_len * 2 * 64
if not Path(lowerCamelCase__ ).joinpath('train.len' ).exists():
save_len_file(lowerCamelCase__ ,lowerCamelCase__ )
else:
_UpperCamelCase : Any = 'examples/seq2seq/test_data/wmt_en_ro'
_UpperCamelCase : Any = max_len * 4
save_len_file(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : str = SeqaSeqDataset(
lowerCamelCase__ ,data_dir=lowerCamelCase__ ,type_path='train' ,max_source_length=lowerCamelCase__ ,max_target_length=lowerCamelCase__ ,n_obs=lowerCamelCase__ ,)
return ds, max_tokens, tokenizer
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[str] = self._get_dataset()
_UpperCamelCase : Any = set(DistributedSortishSampler(lowerCamelCase__ ,256 ,num_replicas=2 ,rank=0 ,add_extra_examples=lowerCamelCase__ ) )
_UpperCamelCase : List[Any] = set(DistributedSortishSampler(lowerCamelCase__ ,256 ,num_replicas=2 ,rank=1 ,add_extra_examples=lowerCamelCase__ ) )
assert idsa.intersection(lowerCamelCase__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] ,)
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase__ ,use_fast=lowerCamelCase__ )
if tok_name == MBART_TINY:
_UpperCamelCase : Union[str, Any] = SeqaSeqDataset(
lowerCamelCase__ ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path='train' ,max_source_length=4 ,max_target_length=8 ,src_lang='EN' ,tgt_lang='FR' ,)
_UpperCamelCase : Tuple = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_UpperCamelCase : Tuple = SeqaSeqDataset(
lowerCamelCase__ ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path='train' ,max_source_length=4 ,max_target_length=8 ,)
_UpperCamelCase : Any = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase__ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase__ ) == 0
| 83 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
snake_case_ : str = logging.getLogger(__name__)
def A__ ( ):
_UpperCamelCase : List[Any] = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=UpperCAmelCase_ , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=UpperCAmelCase_ , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=UpperCAmelCase_ , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=UpperCAmelCase_ , default='data/dump' , help='The dump file prefix.' )
_UpperCamelCase : Any = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase : Optional[int] = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
_UpperCamelCase : Dict = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
_UpperCamelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase : Any = tokenizer.special_tokens_map['cls_token'] # `<s>`
_UpperCamelCase : int = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
_UpperCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase : Optional[Any] = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
_UpperCamelCase : Any = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
_UpperCamelCase : List[Any] = fp.readlines()
logger.info('Start encoding' )
logger.info(f'{len(UpperCAmelCase_ )} examples to process.' )
_UpperCamelCase : int = []
_UpperCamelCase : Any = 0
_UpperCamelCase : Any = 1_0_0_0_0
_UpperCamelCase : Optional[Any] = time.time()
for text in data:
_UpperCamelCase : List[Any] = f'{bos} {text.strip()} {sep}'
_UpperCamelCase : Any = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
rslt.append(UpperCAmelCase_ )
iter += 1
if iter % interval == 0:
_UpperCamelCase : Union[str, Any] = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
_UpperCamelCase : Tuple = time.time()
logger.info('Finished binarization' )
logger.info(f'{len(UpperCAmelCase_ )} examples processed.' )
_UpperCamelCase : Optional[int] = f'{args.dump_file}.{args.tokenizer_name}.pickle'
_UpperCamelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
_UpperCamelCase : List[Any] = [np.uintaa(UpperCAmelCase_ ) for d in rslt]
else:
_UpperCamelCase : Any = [np.intaa(UpperCAmelCase_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(UpperCAmelCase_ , 'wb' ) as handle:
pickle.dump(rslt_ , UpperCAmelCase_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 83 | 1 |
from collections import defaultdict
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> int:
'''simple docstring'''
__UpperCamelCase : int = 1
__UpperCamelCase : str = True
for v in tree[start]:
if v not in visited:
ret += dfs(_lowerCamelCase)
if ret % 2 == 0:
cuts.append(_lowerCamelCase)
return ret
def _SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
dfs(1)
if __name__ == "__main__":
lowercase : Dict = 10, 9
lowercase : int = defaultdict(list)
lowercase : dict[int, bool] = {}
lowercase : list[int] = []
lowercase : Tuple = 0
lowercase : Tuple = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1) | 365 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase : Any = logging.get_logger(__name__)
lowercase : Any = {'vocab_file': 'spiece.model'}
lowercase : int = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :int , a :List[Any] , a :Optional[Any]=False , a :List[str]=True , a :str=False , a :Optional[Any]="<s>" , a :Tuple="</s>" , a :int="<unk>" , a :Optional[Any]="<sep>" , a :List[str]="<pad>" , a :Any="<cls>" , a :List[Any]="<mask>" , a :Optional[Any]=["<eop>", "<eod>"] , a :Optional[Dict[str, Any]] = None , **a :List[str] , ) -> None:
__UpperCamelCase : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
__UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
__UpperCamelCase : int = 3
__UpperCamelCase : Union[str, Any] = do_lower_case
__UpperCamelCase : str = remove_space
__UpperCamelCase : int = keep_accents
__UpperCamelCase : Optional[int] = vocab_file
__UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
__UpperCamelCase : Optional[Any] = jieba
__UpperCamelCase : Optional[int] = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowerCamelCase ( self :Optional[int] ) -> List[str]:
return len(self.sp_model )
def _lowerCamelCase ( self :Dict ) -> str:
__UpperCamelCase : Optional[int] = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Optional[int] ) -> int:
__UpperCamelCase : Tuple = self.__dict__.copy()
__UpperCamelCase : Optional[Any] = None
return state
def __setstate__( self :Optional[int] , a :Dict ) -> str:
__UpperCamelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : Union[str, Any] = {}
__UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self :List[Any] , a :str ) -> int:
if self.remove_space:
__UpperCamelCase : int = " ".join(inputs.strip().split() )
else:
__UpperCamelCase : Union[str, Any] = inputs
__UpperCamelCase : List[str] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__UpperCamelCase : Tuple = unicodedata.normalize("NFKD" , a )
__UpperCamelCase : Optional[Any] = "".join([c for c in outputs if not unicodedata.combining(a )] )
if self.do_lower_case:
__UpperCamelCase : Any = outputs.lower()
return outputs
def _lowerCamelCase ( self :Tuple , a :str ) -> List[str]:
__UpperCamelCase : List[Any] = self.preprocess_text(a )
__UpperCamelCase : int = self.sp_model.encode(a , out_type=a )
__UpperCamelCase : Optional[Any] = []
for piece in pieces:
if len(a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__UpperCamelCase : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCamelCase : List[str] = cur_pieces[1:]
else:
__UpperCamelCase : int = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a )
else:
new_pieces.append(a )
return new_pieces
def _lowerCamelCase ( self :str , a :Dict ) -> List[str]:
return self.sp_model.PieceToId(a )
def _lowerCamelCase ( self :Tuple , a :int ) -> Tuple:
return self.sp_model.IdToPiece(a )
def _lowerCamelCase ( self :Union[str, Any] , a :Union[str, Any] ) -> List[Any]:
__UpperCamelCase : str = "".join(a ).replace(a , " " ).strip()
return out_string
def _lowerCamelCase ( self :Any , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
__UpperCamelCase : Tuple = [self.sep_token_id]
__UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self :Any , a :List[int] , a :Optional[List[int]] = None , a :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is not None:
return ([0] * len(a )) + [1] + ([0] * len(a )) + [1, 1]
return ([0] * len(a )) + [1, 1]
def _lowerCamelCase ( self :Dict , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
__UpperCamelCase : Optional[int] = [self.sep_token_id]
__UpperCamelCase : Dict = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCamelCase ( self :Union[str, Any] , a :str , a :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : Tuple = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , "wb" ) as fi:
__UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def _lowerCamelCase ( self :str , *a :str , **a :Any ) -> Tuple:
__UpperCamelCase : int = super()._decode(*a , **a )
__UpperCamelCase : int = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text | 151 | 0 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt"}
__A = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
__A = {
"openbmb/cpm-ant-10b": 1_0_2_4,
}
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[int]:
lowercase__: str = collections.OrderedDict()
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase__: Optional[Any] = reader.readlines()
for index, token in enumerate(lowerCAmelCase__ ):
lowercase__: str = token.rstrip('''\n''' )
lowercase__: Optional[int] = index
return vocab
class UpperCAmelCase (_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<unk>" , _UpperCAmelCase=200 ):
lowercase__: Union[str, Any] = vocab
lowercase__: Optional[Any] = unk_token
lowercase__: Optional[Any] = max_input_chars_per_word
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = list(_UpperCAmelCase )
if len(_UpperCAmelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase__: Tuple = 0
lowercase__: str = []
while start < len(_UpperCAmelCase ):
lowercase__: Optional[Any] = len(_UpperCAmelCase )
lowercase__: int = None
while start < end:
lowercase__: Optional[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowercase__: Dict = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_UpperCAmelCase )
lowercase__: Tuple = end
return sub_tokens
class UpperCAmelCase (_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase :Dict = VOCAB_FILES_NAMES
_UpperCAmelCase :str = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Tuple = ["input_ids", "attention_mask"]
_UpperCAmelCase :int = False
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<d>" , _UpperCAmelCase="</d>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="</n>" , _UpperCAmelCase="</_>" , _UpperCAmelCase="left" , **_UpperCAmelCase , ):
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_UpperCAmelCase , eod_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , line_token=_UpperCAmelCase , space_token=_UpperCAmelCase , padding_side=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__: Any = bod_token
lowercase__: Dict = eod_token
lowercase__: List[str] = load_vocab(_UpperCAmelCase )
lowercase__: str = self.encoder[space_token]
lowercase__: Optional[Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase__: Tuple = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCAmelCase : x[1] ) )
lowercase__: List[Any] = {v: k for k, v in self.encoder.items()}
lowercase__: int = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _snake_case ( self ):
return self.encoder[self.bod_token]
@property
def _snake_case ( self ):
return self.encoder[self.eod_token]
@property
def _snake_case ( self ):
return self.encoder["\n"]
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Union[str, Any] = []
for x in jieba.cut(_UpperCAmelCase , cut_all=_UpperCAmelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_UpperCAmelCase ) )
return output_tokens
def _snake_case ( self , _UpperCAmelCase , **_UpperCAmelCase ):
lowercase__: Union[str, Any] = [i for i in token_ids if i >= 0]
lowercase__: int = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
return token in self.encoder
def _snake_case ( self , _UpperCAmelCase ):
return "".join(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if os.path.isdir(_UpperCAmelCase ):
lowercase__: Optional[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase__: str = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowercase__: List[str] = 0
if " " in self.encoder:
lowercase__: Optional[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowercase__: str = self.encoder['''\n''']
del self.encoder["\n"]
lowercase__: List[str] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCAmelCase : x[1] ) )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase__: Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase ))
return [1] + ([0] * len(_UpperCAmelCase ))
| 177 |
'''simple docstring'''
def _A (lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
move_disk(lowerCAmelCase__ , lowerCAmelCase__ )
move_tower(height - 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _A (lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
print('moving disk from' , lowerCAmelCase__ , 'to' , lowerCAmelCase__ )
def _A () -> str:
'''simple docstring'''
_a = int(input('Height of hanoi: ' ).strip() )
move_tower(lowerCAmelCase__ , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 168 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str=1_3 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Dict=9_9 , lowerCAmelCase_ : int=3_2 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : str=3_7 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : int=5_1_2 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : List[Any]=None , ):
"""simple docstring"""
_A: Tuple = parent
_A: int = 1_3
_A: List[str] = 7
_A: List[str] = True
_A: int = True
_A: Union[str, Any] = True
_A: Dict = True
_A: Union[str, Any] = 9_9
_A: Any = 3_8_4
_A: Dict = 2
_A: Optional[int] = 4
_A: Any = 3_7
_A: int = '''gelu'''
_A: List[str] = 0.1
_A: List[Any] = 0.1
_A: Tuple = 5_1_2
_A: Dict = 1_6
_A: Tuple = 2
_A: List[Any] = 0.02
_A: str = 3
_A: str = 4
_A: int = 1_2_8
_A: str = 2
_A: Tuple = 9
_A: Tuple = 1
_A: str = None
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A: Optional[int] = None
if self.use_input_mask:
_A: str = random_attention_mask([self.batch_size, self.seq_length] )
_A: Optional[int] = None
if self.use_token_type_ids:
_A: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A: Any = None
_A: Any = None
_A: Union[str, Any] = None
if self.use_labels:
_A: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A: int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A: List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_A: Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict ):
"""simple docstring"""
_A: Optional[Any] = TFConvBertModel(config=lowerCAmelCase_ )
_A: List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_A: List[str] = [input_ids, input_mask]
_A: List[Any] = model(lowerCAmelCase_ )
_A: Tuple = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Optional[Any] = TFConvBertForMaskedLM(config=lowerCAmelCase_ )
_A: Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_A: List[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: Union[str, Any] = self.num_labels
_A: List[Any] = TFConvBertForSequenceClassification(config=lowerCAmelCase_ )
_A: Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_A: str = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
_A: Optional[int] = self.num_choices
_A: Union[str, Any] = TFConvBertForMultipleChoice(config=lowerCAmelCase_ )
_A: Optional[Any] = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
_A: Dict = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
_A: Any = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
_A: Tuple = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_A: Union[str, Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ):
"""simple docstring"""
_A: List[Any] = self.num_labels
_A: Union[str, Any] = TFConvBertForTokenClassification(config=lowerCAmelCase_ )
_A: Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_A: List[str] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: Any = TFConvBertForQuestionAnswering(config=lowerCAmelCase_ )
_A: int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_A: Dict = model(lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: List[str] = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
): Optional[Any] = config_and_inputs
_A: int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : int = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCamelCase : Tuple = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase : str = False
__UpperCamelCase : Dict = False
__UpperCamelCase : int = False
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: str = TFConvBertModelTester(self )
_A: List[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A , _A: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A: Optional[int] = True
_A: str = True
if hasattr(lowerCAmelCase_ , '''use_cache''' ):
_A: List[Any] = True
_A: Optional[Any] = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
_A: str = getattr(self.model_tester , '''key_length''' , lowerCAmelCase_ )
for model_class in self.all_model_classes:
_A: Any = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_A: int = model_class(lowerCAmelCase_ )
_A: Tuple = len(model(lowerCAmelCase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase_ , saved_model=lowerCAmelCase_ )
_A: str = os.path.join(lowerCAmelCase_ , '''saved_model''' , '''1''' )
_A: List[str] = tf.keras.models.load_model(lowerCAmelCase_ )
_A: List[Any] = model(lowerCAmelCase_ )
if self.is_encoder_decoder:
_A: Optional[int] = outputs['''encoder_hidden_states''']
_A: List[Any] = outputs['''encoder_attentions''']
else:
_A: List[str] = outputs['''hidden_states''']
_A: Any = outputs['''attentions''']
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_A: Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: Union[str, Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A , _A: Any = self.model_tester.prepare_config_and_inputs_for_common()
_A: Optional[Any] = True
_A: Optional[int] = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
_A: List[str] = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
_A: Optional[int] = getattr(self.model_tester , '''key_length''' , lowerCAmelCase_ )
_A: Union[str, Any] = getattr(self.model_tester , '''key_length''' , lowerCAmelCase_ )
def check_decoder_attentions_output(lowerCAmelCase_ : Union[str, Any] ):
_A: Any = len(lowerCAmelCase_ )
self.assertEqual(out_len % 2 , 0 )
_A: List[Any] = outputs.decoder_attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCAmelCase_ : Union[str, Any] ):
_A: Optional[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_A: List[str] = True
_A: List[str] = False
_A: List[Any] = model_class(lowerCAmelCase_ )
_A: Any = model(self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_A: Tuple = len(lowerCAmelCase_ )
self.assertEqual(config.output_hidden_states , lowerCAmelCase_ )
check_encoder_attentions_output(lowerCAmelCase_ )
if self.is_encoder_decoder:
_A: Optional[Any] = model_class(lowerCAmelCase_ )
_A: Optional[Any] = model(self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase_ )
check_decoder_attentions_output(lowerCAmelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_A: Any = True
_A: int = model_class(lowerCAmelCase_ )
_A: Tuple = model(self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase_ )
check_encoder_attentions_output(lowerCAmelCase_ )
# Check attention is always last and order is fine
_A: Any = True
_A: Union[str, Any] = True
_A: Union[str, Any] = model_class(lowerCAmelCase_ )
_A: Any = model(self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCAmelCase_ ) )
self.assertEqual(model.config.output_hidden_states , lowerCAmelCase_ )
check_encoder_attentions_output(lowerCAmelCase_ )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: int = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
_A: Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A: str = model(lowerCAmelCase_ )[0]
_A: str = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowerCAmelCase_ )
_A: Tuple = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
| 301 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : int=1_0 , lowerCAmelCase_ : Tuple=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Optional[Any]=[1, 1, 2, 1] , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]="relu" , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : List[Any]=None , ):
"""simple docstring"""
_A: str = parent
_A: List[Any] = batch_size
_A: Optional[int] = image_size
_A: Dict = num_channels
_A: str = embeddings_size
_A: Any = hidden_sizes
_A: Dict = depths
_A: Any = is_training
_A: int = use_labels
_A: Tuple = hidden_act
_A: int = num_labels
_A: int = scope
_A: str = len(lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A: Union[str, Any] = self.get_config()
return config, pixel_values
def __magic_name__ ( self : str ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __magic_name__ ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: str = FlaxRegNetModel(config=lowerCAmelCase_ )
_A: Optional[int] = model(lowerCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __magic_name__ ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Union[str, Any] = self.num_labels
_A: Union[str, Any] = FlaxRegNetForImageClassification(config=lowerCAmelCase_ )
_A: str = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: str = self.prepare_config_and_inputs()
_A , _A: Optional[int] = config_and_inputs
_A: Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : int = False
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: int = FlaxRegNetModelTester(self )
_A: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : int ):
"""simple docstring"""
return
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __magic_name__ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
pass
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A , _A: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Union[str, Any] = model_class(lowerCAmelCase_ )
_A: Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A: Any = [*signature.parameters.keys()]
_A: Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ):
_A: int = model_class(lowerCAmelCase_ )
_A: List[str] = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_A: str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A: Tuple = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
_A , _A: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Optional[Any] = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A: int = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A , _A: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A: int = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_A: Union[str, Any] = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest('''JIT Enabled''' ):
_A: str = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_A: List[Any] = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ) -> Tuple:
_A: List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: List[str] = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
_A: str = self.default_image_processor
_A: int = prepare_img()
_A: List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors='''np''' )
_A: str = model(**lowerCAmelCase_ )
# verify the logits
_A: str = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_A: Tuple = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 301 | 1 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ (__snake_case , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
__lowerCamelCase : int = """ssube/stable-diffusion-x4-upscaler-onnx"""
def snake_case_ ( self , a=0):
lowercase__ : List[str] = floats_tensor((1, 3, 128, 128) , rng=random.Random(a))
lowercase__ : Optional[int] = torch.manual_seed(a)
lowercase__ : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self):
lowercase__ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=a)
lowercase__ : Any = self.get_dummy_inputs()
lowercase__ : Optional[int] = pipe(**a).images
lowercase__ : List[str] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowercase__ : str = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def snake_case_ ( self):
lowercase__ : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
lowercase__ : Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a)
pipe.set_progress_bar_config(disable=a)
lowercase__ : Union[str, Any] = self.get_dummy_inputs()
lowercase__ : Tuple = pipe(**a).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Union[str, Any] = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def snake_case_ ( self):
lowercase__ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
lowercase__ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=a)
lowercase__ : Tuple = self.get_dummy_inputs()
lowercase__ : List[Any] = pipe(**a).images
lowercase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : List[str] = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def snake_case_ ( self):
lowercase__ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
lowercase__ : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=a)
lowercase__ : List[Any] = self.get_dummy_inputs()
lowercase__ : Optional[int] = pipe(**a).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Optional[Any] = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def snake_case_ ( self):
lowercase__ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
lowercase__ : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=a)
lowercase__ : Optional[int] = self.get_dummy_inputs()
lowercase__ : Optional[int] = pipe(**a).images
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Optional[Any] = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
@property
def snake_case_ ( self):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case_ ( self):
lowercase__ : List[str] = ort.SessionOptions()
lowercase__ : Optional[int] = False
return options
def snake_case_ ( self):
lowercase__ : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
lowercase__ : str = init_image.resize((128, 128))
# using the PNDM scheduler by default
lowercase__ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a)
lowercase__ : Optional[Any] = 'A fantasy landscape, trending on artstation'
lowercase__ : Optional[int] = torch.manual_seed(0)
lowercase__ : int = pipe(
prompt=a , image=a , guidance_scale=7.5 , num_inference_steps=10 , generator=a , output_type='np' , )
lowercase__ : int = output.images
lowercase__ : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowercase__ : Optional[Any] = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def snake_case_ ( self):
lowercase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
lowercase__ : Optional[Any] = init_image.resize((128, 128))
lowercase__ : Dict = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler')
lowercase__ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a)
lowercase__ : Optional[int] = 'A fantasy landscape, trending on artstation'
lowercase__ : List[Any] = torch.manual_seed(0)
lowercase__ : Union[str, Any] = pipe(
prompt=a , image=a , guidance_scale=7.5 , num_inference_steps=20 , generator=a , output_type='np' , )
lowercase__ : int = output.images
lowercase__ : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowercase__ : List[str] = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 214 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a , a=13 , a=7 , a=False , a=True , a=False , a=False , a=19 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ):
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : Union[str, Any] = seq_length
lowercase__ : Optional[Any] = is_training
lowercase__ : Tuple = use_input_mask
lowercase__ : List[str] = use_token_type_ids
lowercase__ : Optional[Any] = use_labels
lowercase__ : List[str] = vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : int = intermediate_size
lowercase__ : Any = hidden_act
lowercase__ : Any = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : str = initializer_range
lowercase__ : List[str] = num_labels
lowercase__ : Union[str, Any] = num_choices
lowercase__ : Optional[int] = scope
def snake_case_ ( self):
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : List[Any] = None
if self.use_input_mask:
lowercase__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ : int = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[int] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ : str = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self):
lowercase__ : str = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=a , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def snake_case_ ( self , a , a , a , a , a , a):
lowercase__ : Dict = EsmForProteinFolding(config=a).float()
model.to(a)
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a)
lowercase__ : Dict = model(a)
lowercase__ : int = model(a)
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3))
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2))
def snake_case_ ( self):
lowercase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : int = config_and_inputs
lowercase__ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ (__snake_case , __snake_case , unittest.TestCase ):
__lowerCamelCase : Dict = False
__lowerCamelCase : Dict = (EsmForProteinFolding,) if is_torch_available() else ()
__lowerCamelCase : Union[str, Any] = ()
__lowerCamelCase : List[Any] = {} if is_torch_available() else {}
__lowerCamelCase : Optional[Any] = False
def snake_case_ ( self):
lowercase__ : Tuple = EsmFoldModelTester(self)
lowercase__ : List[Any] = ConfigTester(self , config_class=a , hidden_size=37)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
@unittest.skip('Does not support attention outputs')
def snake_case_ ( self):
pass
@unittest.skip
def snake_case_ ( self):
pass
@unittest.skip('Esm does not support embedding resizing')
def snake_case_ ( self):
pass
@unittest.skip('Esm does not support embedding resizing')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support passing input embeds!')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support head pruning.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support head pruning.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support head pruning.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support head pruning.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support head pruning.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.')
def snake_case_ ( self):
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold only has one output format.')
def snake_case_ ( self):
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support input chunking.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold doesn\'t support data parallel.')
def snake_case_ ( self):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def snake_case_ ( self):
pass
@require_torch
class SCREAMING_SNAKE_CASE__ (__snake_case ):
@slow
def snake_case_ ( self):
lowercase__ : Dict = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1').float()
model.eval()
lowercase__ : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
lowercase__ : Optional[int] = model(a)['positions']
lowercase__ : Dict = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa)
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , a , atol=1e-4))
| 214 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Any =logging.get_logger(__name__)
A_ : Dict =Dict[str, Any]
A_ : Dict =List[Prediction]
@add_end_docstrings(lowerCAmelCase__ )
class __a ( lowerCAmelCase__ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def snake_case_ ( self , **a__ ):
_lowerCamelCase = {}
if "threshold" in kwargs:
_lowerCamelCase = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *a__ , **a__ ):
return super().__call__(*a__ , **a__ )
def snake_case_ ( self , a__ ):
_lowerCamelCase = load_image(a__ )
_lowerCamelCase = torch.IntTensor([[image.height, image.width]] )
_lowerCamelCase = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
_lowerCamelCase = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
_lowerCamelCase = target_size
return inputs
def snake_case_ ( self , a__ ):
_lowerCamelCase = model_inputs.pop('target_size' )
_lowerCamelCase = self.model(**a__ )
_lowerCamelCase = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
_lowerCamelCase = model_inputs['bbox']
return model_outputs
def snake_case_ ( self , a__ , a__=0.9 ):
_lowerCamelCase = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_lowerCamelCase , _lowerCamelCase = target_size[0].tolist()
def unnormalize(a__ ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
_lowerCamelCase , _lowerCamelCase = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_lowerCamelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_lowerCamelCase = [unnormalize(a__ ) for bbox in model_outputs['bbox'].squeeze(0 )]
_lowerCamelCase = ['score', 'label', 'box']
_lowerCamelCase = [dict(zip(a__ , a__ ) ) for vals in zip(scores.tolist() , a__ , a__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_lowerCamelCase = self.image_processor.post_process_object_detection(a__ , a__ , a__ )
_lowerCamelCase = raw_annotations[0]
_lowerCamelCase = raw_annotation['scores']
_lowerCamelCase = raw_annotation['labels']
_lowerCamelCase = raw_annotation['boxes']
_lowerCamelCase = scores.tolist()
_lowerCamelCase = [self.model.config.idalabel[label.item()] for label in labels]
_lowerCamelCase = [self._get_bounding_box(a__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_lowerCamelCase = ['score', 'label', 'box']
_lowerCamelCase = [
dict(zip(a__ , a__ ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def snake_case_ ( self , a__ ):
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = box.int().tolist()
_lowerCamelCase = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 80 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Any ={
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] =[
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
A_ : Optional[int] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 | 1 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' ,[
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
lowerCamelCase__ , lowerCamelCase__ = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowerCamelCase__ = F'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__snake_case )
assert base_extractor.is_extractable(__snake_case )
lowerCamelCase__ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(__snake_case ,__snake_case )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowerCamelCase__ = file_path.read_text(encoding='''utf-8''' )
else:
lowerCamelCase__ = output_path.read_text(encoding='''utf-8''' )
lowerCamelCase__ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' ,[
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> Dict:
'''simple docstring'''
lowerCamelCase__ = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
lowerCamelCase__ = input_paths[compression_format]
if input_path is None:
lowerCamelCase__ = F'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__snake_case )
lowerCamelCase__ = Extractor.infer_extractor_format(__snake_case )
assert extractor_format is not None
lowerCamelCase__ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(__snake_case ,__snake_case ,__snake_case )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowerCamelCase__ = file_path.read_text(encoding='''utf-8''' )
else:
lowerCamelCase__ = output_path.read_text(encoding='''utf-8''' )
lowerCamelCase__ = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
import tarfile
lowerCamelCase__ = tmp_path / '''data_dot_dot'''
directory.mkdir()
lowerCamelCase__ = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(__snake_case ,'''w''' ) as f:
f.add(__snake_case ,arcname=os.path.join('''..''' ,text_file.name ) )
return path
@pytest.fixture
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
import tarfile
lowerCamelCase__ = tmp_path / '''data_sym_link'''
directory.mkdir()
lowerCamelCase__ = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' ,directory / '''subdir''' ,target_is_directory=__snake_case )
with tarfile.TarFile(__snake_case ,'''w''' ) as f:
f.add(str(directory / '''subdir''' ) ,arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' ,[('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
lowerCamelCase__ = insecure_tar_files[insecure_tar_file]
lowerCamelCase__ = tmp_path / '''extracted'''
TarExtractor.extract(__snake_case ,__snake_case )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
lowerCamelCase__ = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(__snake_case )
assert zipfile.is_zipfile(str(__snake_case ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__snake_case ) # but we're right
| 209 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=9_9 , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=9 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase=8 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.002 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=None , __lowerCAmelCase=None , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = encoder_seq_length
lowerCamelCase__ = decoder_seq_length
# For common tests
lowerCamelCase__ = self.decoder_seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_attention_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = d_ff
lowerCamelCase__ = relative_attention_num_buckets
lowerCamelCase__ = dropout_rate
lowerCamelCase__ = initializer_factor
lowerCamelCase__ = eos_token_id
lowerCamelCase__ = pad_token_id
lowerCamelCase__ = decoder_start_token_id
lowerCamelCase__ = None
lowerCamelCase__ = decoder_layers
def __lowerCamelCase ( self ):
'''simple docstring'''
return TaConfig.from_pretrained('''google/umt5-base''' )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ):
'''simple docstring'''
if attention_mask is None:
lowerCamelCase__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase__ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__lowerCAmelCase )
if decoder_head_mask is None:
lowerCamelCase__ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__lowerCAmelCase )
if cross_attn_head_mask is None:
lowerCamelCase__ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__lowerCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase__ = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ = self.get_config()
lowerCamelCase__ = config.num_attention_heads
lowerCamelCase__ = self.prepare_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, input_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __lowerCamelCase ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = UMTaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , )
lowerCamelCase__ = model(input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase )
lowerCamelCase__ = result.last_hidden_state
lowerCamelCase__ = result.past_key_values
lowerCamelCase__ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__lowerCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = UMTaModel(config=__lowerCAmelCase ).get_decoder().to(__lowerCAmelCase ).eval()
# first forward pass
lowerCamelCase__ = model(__lowerCAmelCase , use_cache=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , use_cache=__lowerCAmelCase )
self.parent.assertTrue(len(__lowerCAmelCase ) == len(__lowerCAmelCase ) )
self.parent.assertTrue(len(__lowerCAmelCase ) == len(__lowerCAmelCase ) + 1 )
lowerCamelCase__ , lowerCamelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ = model(__lowerCAmelCase )['''last_hidden_state''']
lowerCamelCase__ = model(__lowerCAmelCase , past_key_values=__lowerCAmelCase )['''last_hidden_state''']
# select random slice
lowerCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = UMTaModel(config=__lowerCAmelCase ).to(__lowerCAmelCase ).half().eval()
lowerCamelCase__ = model(**__lowerCAmelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__lowerCAmelCase ).any().item() )
@require_torch
class __A ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowerCAmelCase_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowerCAmelCase_ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowerCAmelCase_ = [0.8, 0.9]
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ = UMTaModel(config_and_inputs[0] ).to(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__lowerCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=__lowerCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ = config_and_inputs[0]
lowerCamelCase__ = UMTaForConditionalGeneration(__lowerCAmelCase ).eval()
model.to(__lowerCAmelCase )
lowerCamelCase__ = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__lowerCAmelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCAmelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCAmelCase ),
}
for attn_name, (name, mask) in zip(__lowerCAmelCase , head_masking.items() ):
lowerCamelCase__ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCamelCase__ = torch.ones(
config.num_decoder_layers , config.num_heads , device=__lowerCAmelCase )
lowerCamelCase__ = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__lowerCAmelCase , return_dict_in_generate=__lowerCAmelCase , **__lowerCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCamelCase__ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__lowerCAmelCase ).to(__lowerCAmelCase )
lowerCamelCase__ = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__lowerCAmelCase , legacy=__lowerCAmelCase )
lowerCamelCase__ = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
lowerCamelCase__ = tokenizer(__lowerCAmelCase , return_tensors='''pt''' , padding=__lowerCAmelCase ).input_ids
# fmt: off
lowerCamelCase__ = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = model.generate(input_ids.to(__lowerCAmelCase ) )
lowerCamelCase__ = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
lowerCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 209 | 1 |
'''simple docstring'''
import sys
from pathlib import Path
__UpperCAmelCase = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__UpperCAmelCase = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
__UpperCAmelCase = '''zero2'''
__UpperCAmelCase = '''zero3'''
__UpperCAmelCase = [ZEROa, ZEROa]
def _snake_case ( A , A , A ) -> List[str]:
lowerCAmelCase__ = parameterized.to_safe_name('''_'''.join(str(lowercase__ ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
__UpperCAmelCase = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class a__ ( A__ ):
'''simple docstring'''
@parameterized.expand(__A , name_func=__A )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> str:
self.run_and_check(
stage=__A , model=__A , distributed=__A , fpaa=__A , )
@require_torch_multi_gpu
@parameterized.expand(__A , name_func=__A )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
self.run_and_check(
stage=__A , model=__A , distributed=__A , fpaa=__A , )
@parameterized.expand(__A , name_func=__A )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> str:
self.run_and_check(
stage=__A , model=__A , distributed=__A , fpaa=__A , )
@require_torch_multi_gpu
@parameterized.expand(__A , name_func=__A )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
self.run_and_check(
stage=__A , model=__A , distributed=__A , fpaa=__A , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[str]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 10 , lowerCamelCase_ = True , lowerCamelCase_ = True , lowerCamelCase_ = True , ) -> Tuple:
lowerCAmelCase__ = models[model]
lowerCAmelCase__ = self.run_trainer(
stage=__A , model_name=__A , eval_steps=__A , num_train_epochs=1 , distributed=__A , fpaa=__A , )
self.do_checks(__A )
return output_dir
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 10 , lowerCamelCase_ = 1 , lowerCamelCase_ = True , lowerCamelCase_ = True , ) -> List[str]:
lowerCAmelCase__ = self.get_auto_remove_tmp_dir('''./xxx''' , after=__A )
lowerCAmelCase__ = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(__A )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
lowerCAmelCase__ = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
lowerCAmelCase__ = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
lowerCAmelCase__ = self.get_launcher(__A )
lowerCAmelCase__ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__A , env=self.get_env() )
return output_dir
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> str:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
lowerCAmelCase__ = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 351 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = CLIPTokenizer
lowercase__ : List[str] = CLIPTokenizerFast
lowercase__ : Dict = True
lowercase__ : Any = {}
lowercase__ : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
super().setUp()
# fmt: off
lowerCAmelCase__ = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowerCAmelCase__ = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
lowerCAmelCase__ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
lowerCAmelCase__ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> Any:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> int:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Any:
lowerCAmelCase__ = '''lower newer'''
lowerCAmelCase__ = '''lower newer'''
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase__ = '''lower newer'''
lowerCAmelCase__ = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
@require_ftfy
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCAmelCase__ = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on unicode of space type
lowerCAmelCase__ = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on unicode of line break type
lowerCAmelCase__ = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase__ = F"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , )
lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
lowerCAmelCase__ = F""" {text}"""
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , )
lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ) + 1, 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCamelCase_ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().test_tokenization_python_rust_equals()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# CLIP always lower cases letters
pass | 228 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( snake_case_ , unittest.TestCase ):
lowercase = LxmertTokenizer
lowercase = LxmertTokenizerFast
lowercase = True
lowercase = True
def A__ ( self ) -> int:
"""simple docstring"""
super().setUp()
UpperCamelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase = """UNwant\u00E9d,running"""
UpperCamelCase = """unwanted, running"""
return input_text, output_text
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.tokenizer_class(self.vocab_file )
UpperCamelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__UpperCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def A__ ( self ) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = """I was born in 92000, and this is falsé."""
UpperCamelCase = tokenizer.tokenize(__UpperCAmelCase )
UpperCamelCase = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
UpperCamelCase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = tokenizer.encode(__UpperCAmelCase )
UpperCamelCase = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 321 |
'''simple docstring'''
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
_A = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(__lowercase )
else:
_A = sylvester(number - 1 )
_A = num - 1
_A = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 79 | 0 |
from collections.abc import Sequence
from queue import Queue
class a :
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = start
lowerCAmelCase = end
lowerCAmelCase = val
lowerCAmelCase = (start + end) // 2
lowerCAmelCase = left
lowerCAmelCase = right
def __repr__( self ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a :
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = collection
lowerCAmelCase = function
if self.collection:
lowerCAmelCase = self._build_tree(0 , len(_snake_case ) - 1 )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
self._update_tree(self.root , _snake_case , _snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
return self._query_range(self.root , _snake_case , _snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_snake_case , _snake_case , self.collection[start] )
lowerCAmelCase = (start + end) // 2
lowerCAmelCase = self._build_tree(_snake_case , _snake_case )
lowerCAmelCase = self._build_tree(mid + 1 , _snake_case )
return SegmentTreeNode(_snake_case , _snake_case , self.fn(left.val , right.val ) , _snake_case , _snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if node.start == i and node.end == i:
lowerCAmelCase = val
return
if i <= node.mid:
self._update_tree(node.left , _snake_case , _snake_case )
else:
self._update_tree(node.right , _snake_case , _snake_case )
lowerCAmelCase = self.fn(node.left.val , node.right.val )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _snake_case , _snake_case )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _snake_case , node.mid ) , self._query_range(node.right , node.mid + 1 , _snake_case ) , )
else:
# range in right child tree
return self._query_range(node.right , _snake_case , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.root is not None:
lowerCAmelCase = Queue()
queue.put(self.root )
while not queue.empty():
lowerCAmelCase = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__UpperCamelCase : Tuple = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 368 |
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_script.main )
def UpperCamelCase__ ( self ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 309 | 0 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _UpperCAmelCase ( *_lowerCamelCase : Optional[int] ) -> Tuple:
with open(_lowerCamelCase , """r""" ) as fh:
fcntl.flock(_lowerCamelCase , fcntl.LOCK_EX )
try:
print(*_lowerCamelCase )
finally:
fcntl.flock(_lowerCamelCase , fcntl.LOCK_UN )
UpperCamelCase_ = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
UpperCamelCase_ = torch.device("""cuda""", local_rank)
UpperCamelCase_ = socket.gethostname()
UpperCamelCase_ = F'[{hostname}-{local_rank}]'
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCamelCase_ = dist.get_rank()
UpperCamelCase_ = dist.get_world_size()
printflock(F'{gpu} is OK (global rank: {rank}/{world_size})')
dist.barrier()
if rank == 0:
printflock(F'pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}')
except Exception:
printflock(F'{gpu} is broken')
raise
| 309 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309 | 1 |
'''simple docstring'''
import random
def _lowerCAmelCase ( __snake_case : int , __snake_case : float , __snake_case : bool = False ) -> dict:
__A : dict = {i: [] for i in range(__snake_case )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__snake_case )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__snake_case ):
for j in range(i + 1 , __snake_case ):
if random.random() < probability:
graph[i].append(__snake_case )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__snake_case )
return graph
def _lowerCAmelCase ( __snake_case : int ) -> dict:
return {
i: [j for j in range(__snake_case ) if i != j] for i in range(__snake_case )
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 362 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase__ : List[Any] = '''bert-base-cased'''
lowercase__ : Union[str, Any] = '''google/pegasus-xsum'''
lowercase__ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase__ : Optional[Any] = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase__ : str = '''patrickvonplaten/t5-tiny-random'''
lowercase__ : List[str] = '''sshleifer/bart-tiny-random'''
lowercase__ : List[str] = '''sshleifer/tiny-mbart'''
lowercase__ : str = '''sshleifer/tiny-marian-en-de'''
def _lowerCAmelCase ( __snake_case : Path , __snake_case : list ) -> str:
__A : Any = '\n'.join(__snake_case )
Path(__snake_case ).open('w' ).writelines(__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__snake_case , f'{split}.source' ) , __snake_case )
_dump_articles(os.path.join(__snake_case , f'{split}.target' ) , __snake_case )
return tmp_dir
class SCREAMING_SNAKE_CASE (a__ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = AutoTokenizer.from_pretrained(_UpperCAmelCase)
__A : int = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__A : int = max(len(tokenizer.encode(_UpperCAmelCase)) for a in ARTICLES)
__A : str = max(len(tokenizer.encode(_UpperCAmelCase)) for a in SUMMARIES)
__A : Dict = 4
__A : Optional[Any] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__A ,__A : Any = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
__A : List[str] = SeqaSeqDataset(
_UpperCAmelCase , data_dir=_UpperCAmelCase , type_path='train' , max_source_length=_UpperCAmelCase , max_target_length=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , )
__A : Any = DataLoader(_UpperCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase)
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__A : Optional[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id)
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = AutoTokenizer.from_pretrained(_UpperCAmelCase)
__A : Optional[int] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__A : Tuple = max(len(tokenizer.encode(_UpperCAmelCase)) for a in ARTICLES)
__A : Any = max(len(tokenizer.encode(_UpperCAmelCase)) for a in SUMMARIES)
__A : Optional[int] = 4
__A : Any = LegacySeqaSeqDataset(
_UpperCAmelCase , data_dir=_UpperCAmelCase , type_path='train' , max_source_length=20 , max_target_length=_UpperCAmelCase , )
__A : Union[str, Any] = DataLoader(_UpperCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25')
__A : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
__A : List[str] = tmp_dir.joinpath('train.source').open().readlines()
__A : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
pack_data_dir(_UpperCAmelCase , _UpperCAmelCase , 128 , _UpperCAmelCase)
__A : Dict = {x.name for x in tmp_dir.iterdir()}
__A : Dict = {x.name for x in save_dir.iterdir()}
__A : str = save_dir.joinpath('train.source').open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_UpperCAmelCase) < len(_UpperCAmelCase)
assert len(_UpperCAmelCase) == 1
assert len(packed_examples[0]) == sum(len(_UpperCAmelCase) for x in orig_examples)
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
__A ,__A ,__A : List[Any] = self._get_dataset(max_len=64)
__A : Union[str, Any] = 64
__A : List[Any] = ds.make_dynamic_sampler(_UpperCAmelCase , required_batch_size_multiple=_UpperCAmelCase)
__A : Union[str, Any] = [len(_UpperCAmelCase) for x in batch_sampler]
assert len(set(_UpperCAmelCase)) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_UpperCAmelCase) == len(_UpperCAmelCase) # no dropped or added examples
__A : List[Any] = DataLoader(_UpperCAmelCase , batch_sampler=_UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2)
__A : Optional[int] = []
__A : Tuple = []
for batch in data_loader:
__A : Optional[int] = batch['input_ids'].shape
__A : Any = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__A : Tuple = np.product(batch['input_ids'].shape)
num_src_per_batch.append(_UpperCAmelCase)
if num_src_tokens > (max_tokens * 1.1):
failures.append(_UpperCAmelCase)
assert num_src_per_batch[0] == max(_UpperCAmelCase)
if failures:
raise AssertionError(F'too many tokens in {len(_UpperCAmelCase)} batches')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A : Optional[int] = self._get_dataset(max_len=512)
__A : Optional[int] = 2
__A : Dict = ds.make_sortish_sampler(_UpperCAmelCase , shuffle=_UpperCAmelCase)
__A : Tuple = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2)
__A : Union[str, Any] = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=_UpperCAmelCase)
__A : str = tokenizer.pad_token_id
def count_pad_tokens(_UpperCAmelCase , _UpperCAmelCase="input_ids"):
return [batch[k].eq(_UpperCAmelCase).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_UpperCAmelCase , k='labels')) < sum(count_pad_tokens(_UpperCAmelCase , k='labels'))
assert sum(count_pad_tokens(_UpperCAmelCase)) < sum(count_pad_tokens(_UpperCAmelCase))
assert len(_UpperCAmelCase) == len(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase=1000 , _UpperCAmelCase=128):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , _UpperCAmelCase):
__A : Dict = 'examples/seq2seq/wmt_en_ro'
__A : Any = max_len * 2 * 64
if not Path(_UpperCAmelCase).joinpath('train.len').exists():
save_len_file(_UpperCAmelCase , _UpperCAmelCase)
else:
__A : int = 'examples/seq2seq/test_data/wmt_en_ro'
__A : Any = max_len * 4
save_len_file(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = AutoTokenizer.from_pretrained(_UpperCAmelCase)
__A : Optional[int] = SeqaSeqDataset(
_UpperCAmelCase , data_dir=_UpperCAmelCase , type_path='train' , max_source_length=_UpperCAmelCase , max_target_length=_UpperCAmelCase , n_obs=_UpperCAmelCase , )
return ds, max_tokens, tokenizer
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A : Tuple = self._get_dataset()
__A : Optional[int] = set(DistributedSortishSampler(_UpperCAmelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=_UpperCAmelCase))
__A : List[str] = set(DistributedSortishSampler(_UpperCAmelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=_UpperCAmelCase))
assert idsa.intersection(_UpperCAmelCase) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , use_fast=_UpperCAmelCase)
if tok_name == MBART_TINY:
__A : Dict = SeqaSeqDataset(
_UpperCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
__A : List[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__A : Any = SeqaSeqDataset(
_UpperCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='train' , max_source_length=4 , max_target_length=8 , )
__A : List[str] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_UpperCAmelCase) == 1 if tok_name == BART_TINY else len(_UpperCAmelCase) == 0 | 190 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(_A , """hidden_sizes"""))
self.parent.assertTrue(hasattr(_A , """num_attention_heads"""))
self.parent.assertTrue(hasattr(_A , """num_encoder_blocks"""))
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=64 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=[2, 2, 2, 2] , __lowerCAmelCase=[8, 4, 2, 1] , __lowerCAmelCase=[16, 32, 64, 128] , __lowerCAmelCase=[1, 4, 8, 16] , __lowerCAmelCase=[1, 2, 4, 8] , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = num_encoder_blocks
lowerCAmelCase = sr_ratios
lowerCAmelCase = depths
lowerCAmelCase = hidden_sizes
lowerCAmelCase = downsampling_rates
lowerCAmelCase = num_attention_heads
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = scope
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def a_ ( self):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = SegformerModel(config=_A)
model.to(_A)
model.eval()
lowerCAmelCase = model(_A)
lowerCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.num_labels
lowerCAmelCase = SegformerForSemanticSegmentation(_A)
model.to(_A)
model.eval()
lowerCAmelCase = model(_A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
lowerCAmelCase = model(_A , labels=_A)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = 1
lowerCAmelCase = SegformerForSemanticSegmentation(config=_A)
model.to(_A)
model.eval()
lowerCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(_A)
lowerCAmelCase = model(_A , labels=_A)
self.parent.assertGreater(result.loss , 0.0)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : Dict = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : str = False
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = SegformerModelTester(self)
lowerCAmelCase = SegformerConfigTester(self , config_class=_A)
def a_ ( self):
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_A)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_A)
@unittest.skip("""SegFormer does not use inputs_embeds""")
def a_ ( self):
"""simple docstring"""
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""")
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_A)
lowerCAmelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
for model_class in self.all_model_classes:
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(_A , _A))
lowerCAmelCase = outputs.attentions
lowerCAmelCase = sum(self.model_tester.depths)
self.assertEqual(len(_A) , _A)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase = True
lowerCAmelCase = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(_A , _A))
lowerCAmelCase = outputs.attentions
self.assertEqual(len(_A) , _A)
# verify the first attentions (first block, first layer)
lowerCAmelCase = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCAmelCase = (self.model_tester.image_size // 32) ** 2
lowerCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCAmelCase = len(_A)
# Check attention is always last and order is fine
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(_A , _A))
self.assertEqual(out_len + 1 , len(_A))
lowerCAmelCase = outputs.attentions
self.assertEqual(len(_A) , _A)
# verify the first attentions (first block, first layer)
lowerCAmelCase = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def a_ ( self):
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
lowerCAmelCase = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(_A , _A))
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(_A) , _A)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(_A , _A , _A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(_A , _A , _A)
def a_ ( self):
"""simple docstring"""
if not self.model_tester.is_training:
return
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(_A):
continue
lowerCAmelCase = model_class(_A)
model.to(_A)
model.train()
lowerCAmelCase = self._prepare_for_class(_A , _A , return_labels=_A)
lowerCAmelCase = model(**_A).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""")
def a_ ( self):
"""simple docstring"""
pass
@slow
def a_ ( self):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = SegformerModel.from_pretrained(_A)
self.assertIsNotNone(_A)
def snake_case__ ( ) -> Any:
'''simple docstring'''
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class a__( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_A , align=_A , do_random_crop=_A)
lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""").to(
_A)
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=_A , return_tensors="""pt""")
lowerCAmelCase = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
lowerCAmelCase = model(_A)
lowerCAmelCase = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , _A)
lowerCAmelCase = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1E-4))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_A , align=_A , do_random_crop=_A)
lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""").to(_A)
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=_A , return_tensors="""pt""")
lowerCAmelCase = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
lowerCAmelCase = model(_A)
lowerCAmelCase = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , _A)
lowerCAmelCase = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _A , atol=1E-1))
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_A , align=_A , do_random_crop=_A)
lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""").to(
_A)
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=_A , return_tensors="""pt""")
lowerCAmelCase = encoded_inputs.pixel_values.to(_A)
with torch.no_grad():
lowerCAmelCase = model(_A)
lowerCAmelCase = outputs.logits.detach().cpu()
lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_A , target_sizes=[(500, 300)])
lowerCAmelCase = torch.Size((500, 300))
self.assertEqual(segmentation[0].shape , _A)
lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_A)
lowerCAmelCase = torch.Size((128, 128))
self.assertEqual(segmentation[0].shape , _A)
| 272 |
import os
import sys
lowercase_ = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase_ = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoConfig.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoTokenizer.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModel.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModel.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def a__ ( *snake_case , **snake_case ):
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*snake_case , **snake_case )
| 303 | 0 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def SCREAMING_SNAKE_CASE ( self , _a=0 ):
__magic_name__ : Optional[Any] = np.random.RandomState(_a )
__magic_name__ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_a )
__magic_name__ : Union[str, Any] = self.get_dummy_inputs()
__magic_name__ : Tuple = pipe(**_a ).images
__magic_name__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__magic_name__ : Union[str, Any] = np.array([0.6_50_72, 0.5_84_92, 0.4_82_19, 0.5_55_21, 0.5_31_80, 0.5_59_39, 0.5_06_97, 0.3_98_00, 0.4_64_55] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__magic_name__ : Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_a )
pipe.set_progress_bar_config(disable=_a )
__magic_name__ : str = self.get_dummy_inputs()
__magic_name__ : Optional[int] = pipe(**_a ).images
__magic_name__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__magic_name__ : str = np.array([0.6_58_63, 0.5_94_25, 0.4_93_26, 0.5_63_13, 0.5_38_75, 0.5_66_27, 0.5_10_65, 0.3_97_77, 0.4_63_30] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__magic_name__ : Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
__magic_name__ : Union[str, Any] = self.get_dummy_inputs()
__magic_name__ : int = pipe(**_a ).images
__magic_name__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__magic_name__ : List[str] = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__magic_name__ : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
__magic_name__ : str = self.get_dummy_inputs()
__magic_name__ : int = pipe(**_a ).images
__magic_name__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__magic_name__ : Optional[Any] = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__magic_name__ : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
__magic_name__ : Optional[Any] = self.get_dummy_inputs()
__magic_name__ : List[str] = pipe(**_a ).images
__magic_name__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__magic_name__ : Tuple = np.array([0.5_38_17, 0.6_08_12, 0.4_73_84, 0.4_95_30, 0.5_18_94, 0.4_98_14, 0.4_79_84, 0.3_89_58, 0.4_42_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__magic_name__ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
__magic_name__ : Union[str, Any] = self.get_dummy_inputs()
__magic_name__ : Union[str, Any] = pipe(**_a ).images
__magic_name__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__magic_name__ : Any = np.array([0.5_38_95, 0.6_08_08, 0.4_79_33, 0.4_96_08, 0.5_18_86, 0.4_99_50, 0.4_80_53, 0.3_89_57, 0.4_42_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_a )
__magic_name__ : Union[str, Any] = self.get_dummy_inputs()
__magic_name__ : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
__magic_name__ : Tuple = pipe(**_a )
__magic_name__ : Union[str, Any] = output.images[0, -3:, -3:, -1]
__magic_name__ : str = self.get_dummy_inputs()
__magic_name__ : Tuple = 3 * [inputs.pop("prompt" )]
__magic_name__ : str = pipe.tokenizer(
_a , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_a , return_tensors="np" , )
__magic_name__ : Union[str, Any] = text_inputs["input_ids"]
__magic_name__ : Dict = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__magic_name__ : Tuple = prompt_embeds
# forward
__magic_name__ : Optional[int] = pipe(**_a )
__magic_name__ : Optional[int] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_a )
__magic_name__ : Union[str, Any] = self.get_dummy_inputs()
__magic_name__ : Optional[int] = 3 * ["this is a negative prompt"]
__magic_name__ : List[str] = negative_prompt
__magic_name__ : Optional[Any] = 3 * [inputs["prompt"]]
# forward
__magic_name__ : Dict = pipe(**_a )
__magic_name__ : Optional[Any] = output.images[0, -3:, -3:, -1]
__magic_name__ : str = self.get_dummy_inputs()
__magic_name__ : Any = 3 * [inputs.pop("prompt" )]
__magic_name__ : str = []
for p in [prompt, negative_prompt]:
__magic_name__ : str = pipe.tokenizer(
_a , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_a , return_tensors="np" , )
__magic_name__ : List[Any] = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__magic_name__ : Dict = embeds
# forward
__magic_name__ : List[Any] = pipe(**_a )
__magic_name__ : List[Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = ort.SessionOptions()
__magic_name__ : Any = False
return options
def SCREAMING_SNAKE_CASE ( self ):
# using the PNDM scheduler by default
__magic_name__ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_a )
__magic_name__ : Dict = "A painting of a squirrel eating a burger"
np.random.seed(0 )
__magic_name__ : Optional[Any] = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" )
__magic_name__ : Union[str, Any] = output.images
__magic_name__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__magic_name__ : Dict = np.array([0.04_52, 0.03_90, 0.00_87, 0.03_50, 0.06_17, 0.03_64, 0.05_44, 0.05_23, 0.07_20] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
__magic_name__ : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_a )
__magic_name__ : int = "open neural network exchange"
__magic_name__ : Tuple = np.random.RandomState(0 )
__magic_name__ : List[Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type="np" )
__magic_name__ : Dict = output.images
__magic_name__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__magic_name__ : Tuple = np.array([0.28_67, 0.19_74, 0.14_81, 0.72_94, 0.72_51, 0.66_67, 0.41_94, 0.56_42, 0.64_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
__magic_name__ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_a )
__magic_name__ : List[Any] = "open neural network exchange"
__magic_name__ : List[str] = np.random.RandomState(0 )
__magic_name__ : List[str] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type="np" )
__magic_name__ : Dict = output.images
__magic_name__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__magic_name__ : Optional[int] = np.array([0.23_06, 0.19_59, 0.15_93, 0.65_49, 0.63_94, 0.54_08, 0.50_65, 0.60_10, 0.61_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = 0
def test_callback_fn(_a , _a , _a ) -> None:
__magic_name__ : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__magic_name__ : Optional[Any] = latents[0, -3:, -3:, -1]
__magic_name__ : Union[str, Any] = np.array(
[-0.67_72, -0.38_35, -1.24_56, 0.19_05, -1.09_74, 0.69_67, -1.93_53, 0.01_78, 1.01_67] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__magic_name__ : List[Any] = latents[0, -3:, -3:, -1]
__magic_name__ : Optional[Any] = np.array(
[-0.33_51, 0.22_41, -0.18_37, -0.23_25, -0.65_77, 0.33_93, -0.02_41, 0.58_99, 1.38_75] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__magic_name__ : str = False
__magic_name__ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
__magic_name__ : List[str] = "Andromeda galaxy in a bottle"
__magic_name__ : List[Any] = np.random.RandomState(0 )
pipe(
prompt=_a , num_inference_steps=5 , guidance_scale=7.5 , generator=_a , callback=_a , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_a , _a )
assert pipe.safety_checker is None
__magic_name__ : str = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_a )
__magic_name__ : Any = OnnxStableDiffusionPipeline.from_pretrained(_a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__magic_name__ : List[Any] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 360 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _snake_case :
UpperCamelCase__ = LEDConfig
UpperCamelCase__ = {}
UpperCamelCase__ = 'gelu'
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , _a=4 , ):
__magic_name__ : Optional[int] = parent
__magic_name__ : Optional[int] = batch_size
__magic_name__ : int = seq_length
__magic_name__ : Union[str, Any] = is_training
__magic_name__ : Tuple = use_labels
__magic_name__ : Optional[int] = vocab_size
__magic_name__ : Dict = hidden_size
__magic_name__ : Union[str, Any] = num_hidden_layers
__magic_name__ : int = num_attention_heads
__magic_name__ : str = intermediate_size
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : List[Any] = attention_probs_dropout_prob
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : List[str] = eos_token_id
__magic_name__ : Any = pad_token_id
__magic_name__ : List[Any] = bos_token_id
__magic_name__ : Union[str, Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__magic_name__ : Optional[int] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__magic_name__ : List[str] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__magic_name__ : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__magic_name__ : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__magic_name__ : Optional[int] = prepare_led_inputs_dict(_a , _a , _a )
__magic_name__ : List[str] = tf.concat(
[tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]] , axis=-1 , )
__magic_name__ : str = global_attention_mask
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Optional[int] = TFLEDModel(config=_a ).get_decoder()
__magic_name__ : Optional[Any] = inputs_dict["input_ids"]
__magic_name__ : List[Any] = input_ids[:1, :]
__magic_name__ : Tuple = inputs_dict["attention_mask"][:1, :]
__magic_name__ : Dict = 1
# first forward pass
__magic_name__ : List[Any] = model(_a , attention_mask=_a , use_cache=_a )
__magic_name__ , __magic_name__ : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__magic_name__ : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__magic_name__ : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
__magic_name__ : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__magic_name__ : Any = model(_a , attention_mask=_a )[0]
__magic_name__ : Union[str, Any] = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__magic_name__ : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__magic_name__ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
__magic_name__ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def lowerCAmelCase_ ( _snake_case : int , _snake_case : int , _snake_case : Any , _snake_case : Optional[Any]=None , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : Dict=None , ) -> Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
__magic_name__ : Dict = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__magic_name__ : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__magic_name__ : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = TFLEDModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[Any] = tf.zeros_like(inputs_dict["attention_mask"] )
__magic_name__ : Optional[int] = 2
__magic_name__ : Tuple = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
__magic_name__ : Union[str, Any] = True
__magic_name__ : Any = self.model_tester.seq_length
__magic_name__ : str = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_a ):
__magic_name__ : List[Any] = outputs.decoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_a ):
__magic_name__ : List[Any] = [t.numpy() for t in outputs.encoder_attentions]
__magic_name__ : str = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__magic_name__ : str = True
__magic_name__ : List[str] = False
__magic_name__ : Any = False
__magic_name__ : Union[str, Any] = model_class(_a )
__magic_name__ : List[Any] = model(self._prepare_for_class(_a , _a ) )
__magic_name__ : List[Any] = len(_a )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
__magic_name__ : List[Any] = model_class(_a )
__magic_name__ : Optional[int] = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ : Tuple = True
__magic_name__ : Dict = model_class(_a )
__magic_name__ : Any = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
__magic_name__ : Any = True
__magic_name__ : Optional[int] = True
__magic_name__ : Union[str, Any] = model_class(_a )
__magic_name__ : Union[str, Any] = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_a ) )
self.assertEqual(model.config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
# TODO: Head-masking not yet implement
pass
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> Any:
'''simple docstring'''
return tf.constant(_snake_case , dtype=tf.intaa )
snake_case : Tuple = 1E-4
@slow
@require_tf
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
__magic_name__ : Tuple = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Optional[Any] = prepare_led_inputs_dict(model.config , _a , _a )
__magic_name__ : Union[str, Any] = model(**_a )[0]
__magic_name__ : str = (1, 1_024, 768)
self.assertEqual(output.shape , _a )
# change to expected output here
__magic_name__ : List[str] = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
__magic_name__ : Optional[Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
__magic_name__ : Any = prepare_led_inputs_dict(model.config , _a , _a )
__magic_name__ : Tuple = model(**_a )[0]
__magic_name__ : Optional[int] = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , _a )
# change to expected output here
__magic_name__ : List[str] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 , rtol=1e-3 )
| 41 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any, lowerCamelCase : int = 65_536, lowerCamelCase : Optional[int] = None, lowerCamelCase : int = 2, lowerCamelCase : int = 2, lowerCamelCase : int = 0, lowerCamelCase : str = "fourier", lowerCamelCase : bool = True, lowerCamelCase : bool = False, lowerCamelCase : float = 0.0, lowerCamelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), lowerCamelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), lowerCamelCase : Tuple[str] = "UNetMidBlock1D", lowerCamelCase : str = None, lowerCamelCase : Tuple[int] = (32, 32, 64), lowerCamelCase : str = None, lowerCamelCase : int = 8, lowerCamelCase : int = 1, lowerCamelCase : bool = False, ):
'''simple docstring'''
super().__init__()
lowercase__ = sample_size
# time
if time_embedding_type == "fourier":
lowercase__ = GaussianFourierProjection(
embedding_size=8, set_W_to_weight=lowerCamelCase, log=lowerCamelCase, flip_sin_to_cos=lowerCamelCase )
lowercase__ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowercase__ = Timesteps(
block_out_channels[0], flip_sin_to_cos=lowerCamelCase, downscale_freq_shift=lowerCamelCase )
lowercase__ = block_out_channels[0]
if use_timestep_embedding:
lowercase__ = block_out_channels[0] * 4
lowercase__ = TimestepEmbedding(
in_channels=lowerCamelCase, time_embed_dim=lowerCamelCase, act_fn=lowerCamelCase, out_dim=block_out_channels[0], )
lowercase__ = nn.ModuleList([] )
lowercase__ = None
lowercase__ = nn.ModuleList([] )
lowercase__ = None
# down
lowercase__ = in_channels
for i, down_block_type in enumerate(lowerCamelCase ):
lowercase__ = output_channel
lowercase__ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowercase__ = i == len(lowerCamelCase ) - 1
lowercase__ = get_down_block(
lowerCamelCase, num_layers=lowerCamelCase, in_channels=lowerCamelCase, out_channels=lowerCamelCase, temb_channels=block_out_channels[0], add_downsample=not is_final_block or downsample_each_block, )
self.down_blocks.append(lowerCamelCase )
# mid
lowercase__ = get_mid_block(
lowerCamelCase, in_channels=block_out_channels[-1], mid_channels=block_out_channels[-1], out_channels=block_out_channels[-1], embed_dim=block_out_channels[0], num_layers=lowerCamelCase, add_downsample=lowerCamelCase, )
# up
lowercase__ = list(reversed(lowerCamelCase ) )
lowercase__ = reversed_block_out_channels[0]
if out_block_type is None:
lowercase__ = out_channels
else:
lowercase__ = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase ):
lowercase__ = output_channel
lowercase__ = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase ) - 1 else final_upsample_channels
)
lowercase__ = i == len(lowerCamelCase ) - 1
lowercase__ = get_up_block(
lowerCamelCase, num_layers=lowerCamelCase, in_channels=lowerCamelCase, out_channels=lowerCamelCase, temb_channels=block_out_channels[0], add_upsample=not is_final_block, )
self.up_blocks.append(lowerCamelCase )
lowercase__ = output_channel
# out
lowercase__ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32 )
lowercase__ = get_out_block(
out_block_type=lowerCamelCase, num_groups_out=lowerCamelCase, embed_dim=block_out_channels[0], out_channels=lowerCamelCase, act_fn=lowerCamelCase, fc_dim=block_out_channels[-1] // 4, )
def lowercase__ ( self : List[Any], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Union[torch.Tensor, float, int], lowerCamelCase : bool = True, ):
'''simple docstring'''
lowercase__ = timestep
if not torch.is_tensor(lowerCamelCase ):
lowercase__ = torch.tensor([timesteps], dtype=torch.long, device=sample.device )
elif torch.is_tensor(lowerCamelCase ) and len(timesteps.shape ) == 0:
lowercase__ = timesteps[None].to(sample.device )
lowercase__ = self.time_proj(lowerCamelCase )
if self.config.use_timestep_embedding:
lowercase__ = self.time_mlp(lowerCamelCase )
else:
lowercase__ = timestep_embed[..., None]
lowercase__ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowercase__ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowercase__ = ()
for downsample_block in self.down_blocks:
lowercase__ , lowercase__ = downsample_block(hidden_states=lowerCamelCase, temb=lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowercase__ = self.mid_block(lowerCamelCase, lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowercase__ = down_block_res_samples[-1:]
lowercase__ = down_block_res_samples[:-1]
lowercase__ = upsample_block(lowerCamelCase, res_hidden_states_tuple=lowerCamelCase, temb=lowerCamelCase )
# 5. post-process
if self.out_block:
lowercase__ = self.out_block(lowerCamelCase, lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase )
| 207 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
lowercase__ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
lowercase__ = model.state_dict()
def to_tf_var_name(lowerCamelCase_ ):
for patt, repl in iter(lowerCamelCase_ ):
lowercase__ = name.replace(lowerCamelCase_ , lowerCamelCase_ )
return F"""bert/{name}"""
def create_tf_var(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
lowercase__ = tf.dtypes.as_dtype(tensor.dtype )
lowercase__ = tf.get_variable(dtype=lowerCamelCase_ , shape=tensor.shape , name=lowerCamelCase_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCamelCase_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase__ = to_tf_var_name(lowerCamelCase_ )
lowercase__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase__ = torch_tensor.T
lowercase__ = create_tf_var(tensor=lowerCamelCase_ , name=lowerCamelCase_ , session=lowerCamelCase_ )
tf.keras.backend.set_value(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = session.run(lowerCamelCase_ )
print(F"""Successfully created {tf_name}: {np.allclose(lowerCamelCase_ , lowerCamelCase_ )}""" )
lowercase__ = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def a ( lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''Directory in which to save tensorflow model''' )
lowercase__ = parser.parse_args(lowerCamelCase_ )
lowercase__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCamelCase_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 207 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class _UpperCAmelCase( _UpperCamelCase ):
lowercase__ = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
lowercase__ = Features({'question': Value('string' ), 'context': Value('string' )} )
lowercase__ = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
lowercase__ = "question"
lowercase__ = "context"
lowercase__ = "answers"
@property
def UpperCAmelCase ( self) -> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 368 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a , __a , __a = None , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(transformer=__a , vae=__a , scheduler=__a)
# create a imagenet -> id dictionary for easier use
_UpperCamelCase = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''','''):
_UpperCamelCase = int(__a)
_UpperCamelCase = dict(sorted(self.labels.items()))
def UpperCAmelCase ( self , __a) -> List[int]:
'''simple docstring'''
if not isinstance(__a , __a):
_UpperCamelCase = list(__a)
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''')
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , __a , __a = 4.0 , __a = None , __a = 50 , __a = "pil" , __a = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
_UpperCamelCase = len(__a)
_UpperCamelCase = self.transformer.config.sample_size
_UpperCamelCase = self.transformer.config.in_channels
_UpperCamelCase = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__a , device=self.device , dtype=self.transformer.dtype , )
_UpperCamelCase = torch.cat([latents] * 2) if guidance_scale > 1 else latents
_UpperCamelCase = torch.tensor(__a , device=self.device).reshape(-1)
_UpperCamelCase = torch.tensor([10_00] * batch_size , device=self.device)
_UpperCamelCase = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__a)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
_UpperCamelCase = latent_model_input[: len(__a) // 2]
_UpperCamelCase = torch.cat([half, half] , dim=0)
_UpperCamelCase = self.scheduler.scale_model_input(__a , __a)
_UpperCamelCase = t
if not torch.is_tensor(__a):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_UpperCamelCase = latent_model_input.device.type == '''mps'''
if isinstance(__a , __a):
_UpperCamelCase = torch.floataa if is_mps else torch.floataa
else:
_UpperCamelCase = torch.intaa if is_mps else torch.intaa
_UpperCamelCase = torch.tensor([timesteps] , dtype=__a , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
_UpperCamelCase = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCamelCase = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
_UpperCamelCase = self.transformer(
__a , timestep=__a , class_labels=__a).sample
# perform guidance
if guidance_scale > 1:
_UpperCamelCase , _UpperCamelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_UpperCamelCase , _UpperCamelCase = torch.split(__a , len(__a) // 2 , dim=0)
_UpperCamelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_UpperCamelCase = torch.cat([half_eps, half_eps] , dim=0)
_UpperCamelCase = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_UpperCamelCase , _UpperCamelCase = torch.split(__a , __a , dim=1)
else:
_UpperCamelCase = noise_pred
# compute previous image: x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(__a , __a , __a).prev_sample
if guidance_scale > 1:
_UpperCamelCase , _UpperCamelCase = latent_model_input.chunk(2 , dim=0)
else:
_UpperCamelCase = latent_model_input
_UpperCamelCase = 1 / self.vae.config.scaling_factor * latents
_UpperCamelCase = self.vae.decode(__a).sample
_UpperCamelCase = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_UpperCamelCase = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__a)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__a)
| 100 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 310 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__UpperCAmelCase = False
@skip_mps
class __a ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Tuple = StableDiffusionAttendAndExcitePipeline
__snake_case : Any = False
__snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS
__snake_case : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
__snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def A ( cls : str ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase )
@classmethod
def A ( cls : Tuple ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase )
def A ( self : List[Any] ):
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , )
lowerCAmelCase_ : Optional[int] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowerCAmelCase_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
lowerCAmelCase_ : int = CLIPTextModel(UpperCAmelCase )
lowerCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A ( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : int=0 ):
if str(UpperCAmelCase ).startswith("""mps""" ):
lowerCAmelCase_ : Dict = torch.manual_seed(UpperCAmelCase )
else:
lowerCAmelCase_ : Any = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowerCAmelCase_ : Dict = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def A ( self : List[str] ):
lowerCAmelCase_ : int = """cpu"""
lowerCAmelCase_ : int = self.get_dummy_components()
lowerCAmelCase_ : int = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCAmelCase_ : Dict = self.get_dummy_inputs(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = pipe(**UpperCAmelCase ).images
lowerCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCAmelCase_ : Optional[Any] = np.array(
[0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496] )
lowerCAmelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
def A ( self : List[Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def A ( self : Tuple ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A ( self : Optional[int] ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def A ( self : str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def A ( self : Union[str, Any] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def A ( self : List[str] ):
super().test_save_load_local(expected_max_difference=5e-4 )
def A ( self : List[str] ):
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class __a ( unittest.TestCase ):
@classmethod
def A ( cls : List[str] ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase )
@classmethod
def A ( cls : List[str] ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase )
def A ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Dict = torch.manual_seed(51 )
lowerCAmelCase_ : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCAmelCase_ : Optional[Any] = """a painting of an elephant with glasses"""
lowerCAmelCase_ : int = [5, 7]
lowerCAmelCase_ : Dict = pipe(
prompt=UpperCAmelCase , token_indices=UpperCAmelCase , guidance_scale=7.5 , generator=UpperCAmelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCAmelCase_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 28 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> List[str]:
'''simple docstring'''
hf_model.apply_weight_norm()
lowerCAmelCase_ : Dict = checkpoint["""input_conv.weight_g"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.weight_v"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase_ : Tuple = checkpoint[f'upsamples.{i}.1.weight_g']
lowerCAmelCase_ : Any = checkpoint[f'upsamples.{i}.1.weight_v']
lowerCAmelCase_ : int = checkpoint[f'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g']
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v']
lowerCAmelCase_ : Tuple = checkpoint[f'blocks.{i}.convs1.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g']
lowerCAmelCase_ : Optional[Any] = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint["""output_conv.1.weight_g"""]
lowerCAmelCase_ : Dict = checkpoint["""output_conv.1.weight_v"""]
lowerCAmelCase_ : Optional[int] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : List[Any]=None , lowercase__ : Union[str, Any]=None , ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase_ : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(lowercase__ )
else:
lowerCAmelCase_ : Any = SpeechTaHifiGanConfig()
lowerCAmelCase_ : str = SpeechTaHifiGan(lowercase__ )
lowerCAmelCase_ : Tuple = torch.load(lowercase__ )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase__ , lowercase__ )
lowerCAmelCase_ : Optional[int] = np.load(lowercase__ )
lowerCAmelCase_ : Any = stats[0].reshape(-1 )
lowerCAmelCase_ : List[str] = stats[1].reshape(-1 )
lowerCAmelCase_ : Optional[int] = torch.from_numpy(lowercase__ ).float()
lowerCAmelCase_ : Any = torch.from_numpy(lowercase__ ).float()
model.save_pretrained(lowercase__ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 | 1 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , """Tatoeba directory does not exist.""" )
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Any = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowercase)
@slow
def UpperCamelCase__ ( self) -> Any:
self.resolver.convert_models(['''heb-eng'''])
@slow
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase , __UpperCamelCase :List[str] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=__lowercase)
assert mmeta["long_pair"] == "heb-eng"
| 43 |
'''simple docstring'''
import os
_SCREAMING_SNAKE_CASE : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = 0
while index < len(snake_case ) - 1:
snake_case_ = SYMBOLS[numerals[index]]
snake_case_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = ""
snake_case_ = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
snake_case_ = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
snake_case_ = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase_( snake_case : str = "/p089_roman.txt" ):
'''simple docstring'''
snake_case_ = 0
with open(os.path.dirname(snake_case ) + roman_numerals_filename ) as filea:
snake_case_ = filea.readlines()
for line in lines:
snake_case_ = line.strip()
snake_case_ = parse_roman_numerals(snake_case )
snake_case_ = generate_roman_numerals(snake_case )
savings += len(snake_case ) - len(snake_case )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 85 | 0 |
"""simple docstring"""
import os
from math import logaa
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str = "base_exp.txt" ) -> int:
_lowerCAmelCase : float = 0
_lowerCAmelCase : str = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_lowerCamelCase ) ,_lowerCamelCase ) ) ):
_lowerCAmelCase : Tuple = list(map(_lowerCamelCase ,line.split(""",""" ) ) )
if x * logaa(_lowerCamelCase ) > largest:
_lowerCAmelCase : Optional[int] = x * logaa(_lowerCamelCase )
_lowerCAmelCase : Tuple = i + 1
return result
if __name__ == "__main__":
print(solution())
| 363 | """simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ) -> float:
if not nums:
raise ValueError("""List is empty""" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 126 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ChineseCLIPFeatureExtractor''']
lowerCAmelCase__ = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104 |
"""simple docstring"""
def __A ( a_ :int = 60_08_51_47_51_43) -> int:
try:
__a : List[Any] = int(a_)
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''')
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''')
__a : int = 1
__a : List[Any] = 2
while i * i <= n:
while n % i == 0:
__a : List[str] = i
n //= i
i += 1
if n > 1:
__a : Optional[int] = n
return int(a_)
if __name__ == "__main__":
print(F'{solution() = }') | 160 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Optional[Any] ="""transfo-xl"""
UpperCamelCase__ : Dict =["""mems"""]
UpperCamelCase__ : Optional[int] ={
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[Any], __lowercase : Optional[Any]=26_7735, __lowercase : int=[2_0000, 4_0000, 20_0000], __lowercase : Union[str, Any]=1024, __lowercase : Tuple=1024, __lowercase : Tuple=16, __lowercase : Optional[Any]=64, __lowercase : str=4096, __lowercase : Optional[int]=4, __lowercase : Union[str, Any]=False, __lowercase : Union[str, Any]=18, __lowercase : List[str]=1600, __lowercase : List[Any]=1000, __lowercase : Union[str, Any]=True, __lowercase : Tuple=True, __lowercase : Optional[Any]=0, __lowercase : List[str]=-1, __lowercase : int=True, __lowercase : Dict=0.1, __lowercase : Union[str, Any]=0.0, __lowercase : str=True, __lowercase : Optional[Any]="normal", __lowercase : str=0.01, __lowercase : Tuple=0.01, __lowercase : List[Any]=0.02, __lowercase : Any=1e-5, __lowercase : Union[str, Any]=0, **__lowercase : Union[str, Any], ):
lowercase__ = vocab_size
lowercase__ = []
self.cutoffs.extend(__lowercase )
if proj_share_all_but_first:
lowercase__ = [False] + [True] * len(self.cutoffs )
else:
lowercase__ = [False] + [False] * len(self.cutoffs )
lowercase__ = d_model
lowercase__ = d_embed
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = div_val
lowercase__ = pre_lnorm
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = mem_len
lowercase__ = same_length
lowercase__ = attn_type
lowercase__ = clamp_len
lowercase__ = sample_softmax
lowercase__ = adaptive
lowercase__ = dropout
lowercase__ = dropatt
lowercase__ = untie_r
lowercase__ = init
lowercase__ = init_range
lowercase__ = proj_init_std
lowercase__ = init_std
lowercase__ = layer_norm_epsilon
super().__init__(eos_token_id=__lowercase, **__lowercase )
@property
def A__ ( self : Optional[Any] ):
# Message copied from Transformer-XL documentation
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def A__ ( self : List[str], __lowercase : Union[str, Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 224 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=64 , _UpperCamelCase=None ) -> Optional[Any]:
lowerCAmelCase_ = np.random.default_rng(_UpperCamelCase )
lowerCAmelCase_ = length
lowerCAmelCase_ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> int:
return self.length
def __getitem__( self , _UpperCamelCase ) -> Dict:
return {"x": self.x[i], "y": self.y[i]}
class _lowerCAmelCase ( torch.nn.Module ):
def __init__( self , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=False ) -> List[Any]:
super().__init__()
lowerCAmelCase_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase_ = True
def __a ( self , _UpperCamelCase=None ) -> Any:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase_ = False
return x * self.a[0] + self.b[0]
class _lowerCAmelCase ( torch.nn.Module ):
def __init__( self , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=False ) -> Optional[int]:
super().__init__()
lowerCAmelCase_ = torch.nn.Parameter(torch.tensor(_UpperCamelCase ).float() )
lowerCAmelCase_ = torch.nn.Parameter(torch.tensor(_UpperCamelCase ).float() )
lowerCAmelCase_ = True
def __a ( self , _UpperCamelCase=None ) -> int:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase_ = False
return x * self.a + self.b
def lowerCamelCase__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase_ = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
lowerCAmelCase_ = load_dataset("csv" , data_files=__lowerCAmelCase )
lowerCAmelCase_ = datasets["train"].unique("label" )
lowerCAmelCase_ = {v: i for i, v in enumerate(__lowerCAmelCase )}
def tokenize_function(__lowerCAmelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" )
if "label" in examples:
lowerCAmelCase_ = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(__lowerCAmelCase : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=2 )
lowerCAmelCase_ = DataLoader(tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 231 | 0 |
'''simple docstring'''
from __future__ import annotations
import requests
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(_lowerCAmelCase ).json()
def _A ( _lowerCAmelCase = 10 ):
"""simple docstring"""
__lowercase ='https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
__lowercase =requests.get(_lowerCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_lowerCAmelCase ) for story_id in story_ids]
def _A ( _lowerCAmelCase = 10 ):
"""simple docstring"""
__lowercase =hackernews_top_stories(_lowerCAmelCase )
return "\n".join('* [{title}]({url})'.format(**_lowerCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 48 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
for attribute in key.split('.' ):
__lowercase =getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
__lowercase =getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
__lowercase =hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowercase =value
elif weight_type == "weight_g":
__lowercase =value
elif weight_type == "weight_v":
__lowercase =value
elif weight_type == "bias":
__lowercase =value
else:
__lowercase =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[]
__lowercase =fairseq_model.state_dict()
__lowercase =hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowercase =None
for name, value in fairseq_dict.items():
__lowercase =False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
__lowercase =True
elif name.split('.' )[0] == "proj":
__lowercase =fairseq_model.proj
__lowercase =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowercase =True
if "*" in mapped_key:
__lowercase =name.split(_lowerCAmelCase )[0].split('.' )[-2]
__lowercase =mapped_key.replace('*' , _lowerCAmelCase )
if "weight_g" in name:
__lowercase ='weight_g'
elif "weight_v" in name:
__lowercase ='weight_v'
elif "bias" in name:
__lowercase ='bias'
elif "weight" in name:
__lowercase ='weight'
else:
__lowercase =None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =full_name.split('conv_layers.' )[-1]
__lowercase =name.split('.' )
__lowercase =int(items[0] )
__lowercase =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowercase =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowercase =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowercase =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowercase =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase , __lowercase =emb.weight.shape
__lowercase =nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
__lowercase =emb.weight.data
return lin_layer
def _A ( _lowerCAmelCase ):
"""simple docstring"""
with open(_lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
__lowercase =f.readlines()
__lowercase =[line.split(' ' )[0] for line in lines]
__lowercase =len(_lowerCAmelCase )
__lowercase ={
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_lowerCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
__lowercase =WavaVecaConfig.from_pretrained(_lowerCAmelCase )
__lowercase =SpeechaTextaConfig.from_pretrained(
_lowerCAmelCase , vocab_size=_lowerCAmelCase , decoder_layers=_lowerCAmelCase , do_stable_layer_norm=_lowerCAmelCase )
__lowercase =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
__lowercase , __lowercase , __lowercase =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__lowercase =model[0].eval()
# set weights for wav2vec2 encoder
__lowercase =WavaVecaModel(_lowerCAmelCase )
__lowercase =recursively_load_weights_wavaveca(model.encoder , _lowerCAmelCase )
__lowercase =SpeechaTextaForCausalLM(_lowerCAmelCase )
__lowercase , __lowercase =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCAmelCase )
# set output linear layer
unexpected_keys.remove('embed_out' )
__lowercase =nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowercase =SpeechEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
__lowercase =False
# add projection layer
__lowercase =nn.Parameter(projection_layer.weight )
__lowercase =nn.Parameter(projection_layer.bias )
__lowercase =create_vocab_dict(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'vocab.json' ) , 'w' ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =SpeechaTextaTokenizer(os.path.join(_lowerCAmelCase , 'vocab.json' ) )
tokenizer.save_pretrained(_lowerCAmelCase )
__lowercase =hf_wavavec.config.to_dict()
__lowercase =tokenizer.pad_token_id
__lowercase =tokenizer.bos_token_id
__lowercase =tokenizer.eos_token_id
__lowercase ='speech_to_text_2'
__lowercase ='wav2vec2'
__lowercase =SpeechEncoderDecoderConfig.from_dict(_lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
lowerCamelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 48 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __magic_name__ ( _lowercase ):
'''simple docstring'''
__UpperCamelCase = 42
class __magic_name__ ( _lowercase , _lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , _a = 65_536 , _a = None , _a = 2 , _a = 2 , _a = 0 , _a = "fourier" , _a = True , _a = False , _a = 0.0 , _a = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _a = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _a = "UNetMidBlock1D" , _a = None , _a = (32, 32, 64) , _a = None , _a = 8 , _a = 1 , _a = False , ):
"""simple docstring"""
super().__init__()
lowerCamelCase = sample_size
# time
if time_embedding_type == "fourier":
lowerCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=__lowerCamelCase , log=__lowerCamelCase , flip_sin_to_cos=__lowerCamelCase )
lowerCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=__lowerCamelCase , downscale_freq_shift=__lowerCamelCase )
lowerCamelCase = block_out_channels[0]
if use_timestep_embedding:
lowerCamelCase = block_out_channels[0] * 4
lowerCamelCase = TimestepEmbedding(
in_channels=__lowerCamelCase , time_embed_dim=__lowerCamelCase , act_fn=__lowerCamelCase , out_dim=block_out_channels[0] , )
lowerCamelCase = nn.ModuleList([] )
lowerCamelCase = None
lowerCamelCase = nn.ModuleList([] )
lowerCamelCase = None
# down
lowerCamelCase = in_channels
for i, down_block_type in enumerate(__lowerCamelCase ):
lowerCamelCase = output_channel
lowerCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCamelCase = i == len(__lowerCamelCase ) - 1
lowerCamelCase = get_down_block(
__lowerCamelCase , num_layers=__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(__lowerCamelCase )
# mid
lowerCamelCase = get_mid_block(
__lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=__lowerCamelCase , add_downsample=__lowerCamelCase , )
# up
lowerCamelCase = list(reversed(__lowerCamelCase ) )
lowerCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
lowerCamelCase = out_channels
else:
lowerCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(__lowerCamelCase ):
lowerCamelCase = output_channel
lowerCamelCase = (
reversed_block_out_channels[i + 1] if i < len(__lowerCamelCase ) - 1 else final_upsample_channels
)
lowerCamelCase = i == len(__lowerCamelCase ) - 1
lowerCamelCase = get_up_block(
__lowerCamelCase , num_layers=__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(__lowerCamelCase )
lowerCamelCase = output_channel
# out
lowerCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCamelCase = get_out_block(
out_block_type=__lowerCamelCase , num_groups_out=__lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=__lowerCamelCase , act_fn=__lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def _lowerCAmelCase ( self , _a , _a , _a = True , ):
"""simple docstring"""
lowerCamelCase = timestep
if not torch.is_tensor(__lowerCamelCase ):
lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(__lowerCamelCase ) and len(timesteps.shape ) == 0:
lowerCamelCase = timesteps[None].to(sample.device )
lowerCamelCase = self.time_proj(__lowerCamelCase )
if self.config.use_timestep_embedding:
lowerCamelCase = self.time_mlp(__lowerCamelCase )
else:
lowerCamelCase = timestep_embed[..., None]
lowerCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCamelCase = ()
for downsample_block in self.down_blocks:
lowerCamelCase = downsample_block(hidden_states=__lowerCamelCase , temb=__lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCamelCase = self.mid_block(__lowerCamelCase , __lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCamelCase = down_block_res_samples[-1:]
lowerCamelCase = down_block_res_samples[:-1]
lowerCamelCase = upsample_block(__lowerCamelCase , res_hidden_states_tuple=__lowerCamelCase , temb=__lowerCamelCase )
# 5. post-process
if self.out_block:
lowerCamelCase = self.out_block(__lowerCamelCase , __lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=__lowerCamelCase )
| 291 | import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _UpperCamelCase ( snake_case__, snake_case__ = True, snake_case__ = math.inf, snake_case__ = -math.inf, snake_case__ = math.inf, snake_case__ = -math.inf, snake_case__ = False, snake_case__ = 100, snake_case__ = 0.01, snake_case__ = 1, ) -> Any:
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Dict = search_prob
__UpperCAmelCase : Tuple = start_temperate
__UpperCAmelCase : Dict = []
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : int = None
while not search_end:
__UpperCAmelCase : str = current_state.score()
if best_state is None or current_score > best_state.score():
__UpperCAmelCase : Union[str, Any] = current_state
scores.append(snake_case__ )
iterations += 1
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : int = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__UpperCAmelCase : str = random.randint(0, len(snake_case__ ) - 1 ) # picking a random neighbor
__UpperCAmelCase : Tuple = neighbors.pop(snake_case__ )
__UpperCAmelCase : List[str] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__UpperCAmelCase : Dict = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__UpperCAmelCase : int = picked_neighbor
else:
__UpperCAmelCase : List[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__UpperCAmelCase : Union[str, Any] = picked_neighbor
__UpperCAmelCase : int = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__UpperCAmelCase : Optional[Any] = True
else:
__UpperCAmelCase : int = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(snake_case__ ), snake_case__ )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_snake_case = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_snake_case = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
_snake_case = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_snake_case = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Tuple:
return (3 * x**2) - (6 * y)
_snake_case = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_snake_case = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
_snake_case = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_snake_case = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
| 157 | 0 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE : str = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def UpperCamelCase ( _a ) -> List[str]:
'''simple docstring'''
lowercase_ :Union[str, Any] = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE : str = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def UpperCamelCase ( _a ) -> Optional[int]:
'''simple docstring'''
lowercase_ :Any = list(s_dict.keys() )
for key in keys:
lowercase_ :str = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowercase_ :Any = new_key.replace(_snake_case , _snake_case )
print(f"{key} -> {new_key}" )
lowercase_ :str = s_dict.pop(_snake_case )
return s_dict
def UpperCamelCase ( _a ) -> str:
'''simple docstring'''
lowercase_ :int = emb.weight.shape
lowercase_ :List[str] = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
lowercase_ :Any = emb.weight.data
return lin_layer
def UpperCamelCase ( _a , _a ) -> bytes:
'''simple docstring'''
os.makedirs(_snake_case , exist_ok=_snake_case )
lowercase_ :List[str] = os.path.basename(_snake_case )
lowercase_ :str = url.split('''/''' )[-2]
lowercase_ :Optional[int] = os.path.join(_snake_case , _snake_case )
if os.path.exists(_snake_case ) and not os.path.isfile(_snake_case ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(_snake_case ):
lowercase_ :List[Any] = open(_snake_case , '''rb''' ).read()
if hashlib.shaaaa(_snake_case ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(_snake_case ) as source, open(_snake_case , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=_snake_case , unit_divisor=1_0_2_4 ) as loop:
while True:
lowercase_ :Union[str, Any] = source.read(8_1_9_2 )
if not buffer:
break
output.write(_snake_case )
loop.update(len(_snake_case ) )
lowercase_ :List[Any] = open(_snake_case , '''rb''' ).read()
if hashlib.shaaaa(_snake_case ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def UpperCamelCase ( _a , _a ) -> str:
'''simple docstring'''
if ".pt" not in checkpoint_path:
lowercase_ :Any = _download(_MODELS[checkpoint_path] )
else:
lowercase_ :Optional[Any] = torch.load(_snake_case , map_location='''cpu''' )
lowercase_ :int = original_checkpoint['''dims''']
lowercase_ :List[Any] = original_checkpoint['''model_state_dict''']
lowercase_ :List[str] = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(_snake_case )
rename_keys(_snake_case )
lowercase_ :Optional[int] = True
lowercase_ :int = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
lowercase_ :List[Any] = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_snake_case , decoder_ffn_dim=_snake_case , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
lowercase_ :int = WhisperForConditionalGeneration(_snake_case )
lowercase_ :List[Any] = model.model.load_state_dict(_snake_case , strict=_snake_case )
if len(_snake_case ) > 0 and not set(_snake_case ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowercase_ :List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase_ :List[str] = proj_out_weights
model.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 352 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : List[Any] ="""gpt_bigcode"""
lowercase : Dict =["""past_key_values"""]
lowercase : List[Any] ={
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCamelCase_=5_0257 , UpperCamelCase_=1024 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=None , UpperCamelCase_="gelu_pytorch_tanh" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=1E-5 , UpperCamelCase_=0.02 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=5_0256 , UpperCamelCase_=5_0256 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , **UpperCamelCase_ , ):
lowercase_ :Any = vocab_size
lowercase_ :List[str] = n_positions
lowercase_ :Union[str, Any] = n_embd
lowercase_ :Dict = n_layer
lowercase_ :Optional[int] = n_head
lowercase_ :List[str] = n_inner
lowercase_ :List[str] = activation_function
lowercase_ :Optional[int] = resid_pdrop
lowercase_ :Union[str, Any] = embd_pdrop
lowercase_ :Any = attn_pdrop
lowercase_ :Optional[Any] = layer_norm_epsilon
lowercase_ :str = initializer_range
lowercase_ :Optional[Any] = scale_attn_weights
lowercase_ :Any = use_cache
lowercase_ :Union[str, Any] = attention_softmax_in_fpaa
lowercase_ :int = scale_attention_softmax_in_fpaa
lowercase_ :Union[str, Any] = multi_query
lowercase_ :List[str] = bos_token_id
lowercase_ :Optional[int] = eos_token_id
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 252 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
_a = logging.get_logger(__name__)
_a = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_a = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_a = {
"facebook/blenderbot_small-90M": 512,
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase=False , __lowerCAmelCase=True , **__lowerCAmelCase , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=__lowerCAmelCase , merges=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , ) , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCamelCase__ = add_prefix_space
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
'''simple docstring'''
lowerCamelCase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 209 |
def lowerCAmelCase__(__snake_case ) -> list:
'''simple docstring'''
lowerCamelCase__ = len(__snake_case )
for _ in range(__snake_case ):
for i in range(_ % 2 ,arr_size - 1 ,2 ):
if arr[i + 1] < arr[i]:
lowerCamelCase__ , lowerCamelCase__ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_a = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 209 | 1 |
'''simple docstring'''
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 370 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ["""MobileViTFeatureExtractor"""]
_lowerCamelCase = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__UpperCamelCase = 12_8022
__UpperCamelCase = 12_8028
@require_sentencepiece
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = MaMaaaTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> Tuple:
super().setUp()
snake_case_ = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
snake_case_ = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__))))
snake_case_ = Path(self.tmpdirname)
save_json(lowerCAmelCase__, save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase__, save_dir / VOCAB_FILES_NAMES['spm_file'])
snake_case_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self, **lowerCAmelCase__) -> Union[str, Any]:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> Optional[Any]:
return (
"This is a test",
"This is a test",
)
def a_ ( self) -> Optional[int]:
snake_case_ = '</s>'
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.get_tokenizer()
snake_case_ = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0], '</s>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[-1], '<s>')
self.assertEqual(len(lowerCAmelCase__), tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('Skip this test while all models are still to be uploaded.')
def a_ ( self) -> Tuple:
pass
def a_ ( self) -> Tuple:
snake_case_ = self.get_tokenizer()
snake_case_ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase__, ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__), [2, 3, 4, 5, 6], )
snake_case_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(lowerCAmelCase__, ['▁This', '▁is', '▁a', '▁t', 'est'])
snake_case_ = tokenizer.convert_tokens_to_string(lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__, 'This is a test')
@slow
def a_ ( self) -> Tuple:
# fmt: off
snake_case_ = {'input_ids': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='facebook/m2m100_418M', revision='c168bae485c864188cf9aa0e4108b0b6934dc91e', )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "facebook/m2m100_418M"
SCREAMING_SNAKE_CASE_ = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
SCREAMING_SNAKE_CASE_ = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
SCREAMING_SNAKE_CASE_ = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def a_ ( cls) -> Any:
snake_case_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='en', tgt_lang='fr')
snake_case_ = 1
return cls
def a_ ( self) -> Tuple:
self.assertEqual(self.tokenizer.get_lang_id('ar'), 12_8006)
self.assertEqual(self.tokenizer.get_lang_id('en'), 12_8022)
self.assertEqual(self.tokenizer.get_lang_id('ro'), 12_8076)
self.assertEqual(self.tokenizer.get_lang_id('mr'), 12_8063)
def a_ ( self) -> List[str]:
snake_case_ = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCAmelCase__), self.tokenizer.vocab_size)
self.assertEqual(vocab['<unk>'], 3)
self.assertIn(self.tokenizer.get_lang_token('en'), lowerCAmelCase__)
def a_ ( self) -> Dict:
snake_case_ = 'en'
snake_case_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, lowerCAmelCase__)
def a_ ( self) -> Any:
self.assertIn(lowerCAmelCase__, self.tokenizer.all_special_ids)
# fmt: off
snake_case_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
snake_case_ = self.tokenizer.decode(lowerCAmelCase__, skip_special_tokens=lowerCAmelCase__)
snake_case_ = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__, lowerCAmelCase__)
self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
snake_case_ = tempfile.mkdtemp()
snake_case_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCAmelCase__)
snake_case_ = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__)
self.assertDictEqual(new_tok.lang_token_to_id, lowerCAmelCase__)
@require_torch
def a_ ( self) -> Any:
snake_case_ = 'en'
snake_case_ = 'fr'
snake_case_ = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCAmelCase__, return_tensors='pt')
snake_case_ = shift_tokens_right(
batch['labels'], self.tokenizer.pad_token_id, self.tokenizer.eos_token_id)
for k in batch:
snake_case_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def a_ ( self) -> Any:
snake_case_ = 'mr'
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('mr')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
snake_case_ = 'zh'
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('zh')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
@require_torch
def a_ ( self) -> Dict:
snake_case_ = 'mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('mr')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
snake_case_ = 'zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id('zh')])
self.assertListEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens, [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def a_ ( self) -> Any:
snake_case_ = self.tokenizer._build_translation_inputs('A test', return_tensors='pt', src_lang='en', tgt_lang='ar')
self.assertEqual(
nested_simplify(lowerCAmelCase__), {
# en_XX, A, test, EOS
'input_ids': [[12_8022, 58, 4183, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 12_8006,
}, )
| 69 |
'''simple docstring'''
lowerCAmelCase__ = '''Input must be a string of 8 numbers plus letter'''
lowerCAmelCase__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def _A ( A__ ):
"""simple docstring"""
if not isinstance(A__ , A__ ):
__lowercase = F"Expected string as input, found {type(A__ ).__name__}"
raise TypeError(A__ )
__lowercase = spanish_id.replace('''-''' , '''''' ).upper()
if len(A__ ) != 9:
raise ValueError(A__ )
try:
__lowercase = int(spanish_id_clean[0:8] )
__lowercase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(A__ ) from ex
if letter.isdigit():
raise ValueError(A__ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 0 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE (a__ , a__ ):
@register_to_config
def __init__( self : Optional[int] , a : Optional[int] = 128 , a : Optional[Any] = 256 , a : List[str] = 2_000.0 , a : Tuple = 768 , a : Optional[int] = 12 , a : Optional[int] = 12 , a : List[str] = 64 , a : Union[str, Any] = 2_048 , a : List[Any] = 0.1 , )-> int:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Sequential(
nn.Linear(_lowerCamelCase , d_model * 4 , bias=_lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowerCamelCase ) , nn.SiLU() , )
lowercase__ = nn.Embedding(_lowerCamelCase , _lowerCamelCase )
lowercase__ = False
lowercase__ = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
lowercase__ = nn.Dropout(p=_lowerCamelCase )
lowercase__ = nn.ModuleList()
for lyr_num in range(_lowerCamelCase ):
# FiLM conditional T5 decoder
lowercase__ = DecoderLayer(d_model=_lowerCamelCase , d_kv=_lowerCamelCase , num_heads=_lowerCamelCase , d_ff=_lowerCamelCase , dropout_rate=_lowerCamelCase )
self.decoders.append(_lowerCamelCase )
lowercase__ = TaLayerNorm(_lowerCamelCase )
lowercase__ = nn.Dropout(p=_lowerCamelCase )
lowercase__ = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Dict , a : str )-> Optional[Any]:
"""simple docstring"""
lowercase__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Union[str, Any] , a : List[Any] , a : int )-> List[Any]:
"""simple docstring"""
lowercase__ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowercase__ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowercase__ = self.conditioning_emb(_lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowercase__ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowercase__ = torch.broadcast_to(
torch.arange(_lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowercase__ = self.position_encoding(_lowerCamelCase )
lowercase__ = self.continuous_inputs_projection(_lowerCamelCase )
inputs += position_encodings
lowercase__ = self.dropout(_lowerCamelCase )
# decoder: No padding present.
lowercase__ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowercase__ = [(x, self.encoder_decoder_mask(_lowerCamelCase , _lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowercase__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowercase__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowercase__ = lyr(
_lowerCamelCase , conditioning_emb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )[0]
lowercase__ = self.decoder_norm(_lowerCamelCase )
lowercase__ = self.post_dropout(_lowerCamelCase )
lowercase__ = self.spec_out(_lowerCamelCase )
return spec_out
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Dict , a : Union[str, Any] , a : Optional[int] , a : Tuple , a : List[Any] , a : Any , a : int=1E-6 )-> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowerCamelCase , d_kv=_lowerCamelCase , num_heads=_lowerCamelCase , dropout_rate=_lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowerCamelCase , d_kv=_lowerCamelCase , num_heads=_lowerCamelCase , dropout_rate=_lowerCamelCase , layer_norm_epsilon=_lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowerCamelCase , d_ff=_lowerCamelCase , dropout_rate=_lowerCamelCase , layer_norm_epsilon=_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Any , a : Dict=None , a : List[str]=None , a : str=None , a : Tuple=None , a : Optional[int]=None , )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.layer[0](
_lowerCamelCase , conditioning_emb=_lowerCamelCase , attention_mask=_lowerCamelCase , )
if encoder_hidden_states is not None:
lowercase__ = torch.where(encoder_attention_mask > 0 , 0 , -1E1_0 ).to(
encoder_hidden_states.dtype )
lowercase__ = self.layer[1](
_lowerCamelCase , key_value_states=_lowerCamelCase , attention_mask=_lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
lowercase__ = self.layer[-1](_lowerCamelCase , _lowerCamelCase )
return (hidden_states,)
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : List[str] , a : str , a : Tuple , a : str , a : Optional[int] )-> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ = TaLayerNorm(_lowerCamelCase )
lowercase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowerCamelCase )
lowercase__ = Attention(query_dim=_lowerCamelCase , heads=_lowerCamelCase , dim_head=_lowerCamelCase , out_bias=_lowerCamelCase , scale_qk=_lowerCamelCase )
lowercase__ = nn.Dropout(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Tuple=None , a : Tuple=None , )-> List[str]:
"""simple docstring"""
lowercase__ = self.layer_norm(_lowerCamelCase )
if conditioning_emb is not None:
lowercase__ = self.FiLMLayer(_lowerCamelCase , _lowerCamelCase )
# Self-attention block
lowercase__ = self.attention(_lowerCamelCase )
lowercase__ = hidden_states + self.dropout(_lowerCamelCase )
return hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : List[Any] , a : str , a : str , a : Tuple , a : Optional[int] , a : str )-> Any:
"""simple docstring"""
super().__init__()
lowercase__ = Attention(query_dim=_lowerCamelCase , heads=_lowerCamelCase , dim_head=_lowerCamelCase , out_bias=_lowerCamelCase , scale_qk=_lowerCamelCase )
lowercase__ = TaLayerNorm(_lowerCamelCase , eps=_lowerCamelCase )
lowercase__ = nn.Dropout(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Union[str, Any] , a : Any=None , a : Optional[Any]=None , )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.layer_norm(_lowerCamelCase )
lowercase__ = self.attention(
_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
lowercase__ = hidden_states + self.dropout(_lowerCamelCase )
return layer_output
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : List[str] , a : str , a : Union[str, Any] , a : Tuple , a : int )-> int:
"""simple docstring"""
super().__init__()
lowercase__ = TaDenseGatedActDense(d_model=_lowerCamelCase , d_ff=_lowerCamelCase , dropout_rate=_lowerCamelCase )
lowercase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowerCamelCase )
lowercase__ = TaLayerNorm(_lowerCamelCase , eps=_lowerCamelCase )
lowercase__ = nn.Dropout(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : List[Any] , a : Tuple=None )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.layer_norm(_lowerCamelCase )
if conditioning_emb is not None:
lowercase__ = self.film(_lowerCamelCase , _lowerCamelCase )
lowercase__ = self.DenseReluDense(_lowerCamelCase )
lowercase__ = hidden_states + self.dropout(_lowerCamelCase )
return hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Tuple , a : int , a : List[str] , a : Tuple )-> int:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
lowercase__ = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
lowercase__ = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
lowercase__ = nn.Dropout(_lowerCamelCase )
lowercase__ = NewGELUActivation()
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> List[Any]:
"""simple docstring"""
lowercase__ = self.act(self.wi_a(_lowerCamelCase ) )
lowercase__ = self.wi_a(_lowerCamelCase )
lowercase__ = hidden_gelu * hidden_linear
lowercase__ = self.dropout(_lowerCamelCase )
lowercase__ = self.wo(_lowerCamelCase )
return hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Union[str, Any] , a : Optional[int] , a : Tuple=1E-6 )-> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Parameter(torch.ones(_lowerCamelCase ) )
lowercase__ = eps
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Any )-> Optional[Any]:
"""simple docstring"""
lowercase__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowerCamelCase )
lowercase__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowercase__ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : List[Any] )-> torch.Tensor:
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044715 * torch.pow(_lowerCamelCase , 3.0 )) ))
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Tuple , a : str , a : Tuple )-> Any:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Linear(_lowerCamelCase , out_features * 2 , bias=_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Optional[int] , a : Any )-> List[str]:
"""simple docstring"""
lowercase__ = self.scale_bias(_lowerCamelCase )
lowercase__ = torch.chunk(_lowerCamelCase , 2 , -1 )
lowercase__ = x * (1 + scale) + shift
return x
| 369 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_UpperCamelCase : Optional[str] = field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_UpperCamelCase : bool = field(default=UpperCAmelCase , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : str = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
_UpperCamelCase : int = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_UpperCamelCase : bool = field(
default=UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __UpperCamelCase () -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
lowercase__ = import_module('tasks' )
try:
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , model_args.task_type )
lowercase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ = token_classification_task.get_labels(data_args.labels )
lowercase__ = dict(enumerate(_SCREAMING_SNAKE_CASE ) )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(_SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , )
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ = (
TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ = (
TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[List[int], List[int]]:
lowercase__ = np.argmax(_SCREAMING_SNAKE_CASE , axis=2 )
lowercase__ , lowercase__ = preds.shape
lowercase__ = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
lowercase__ = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ , lowercase__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"precision": precision_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"recall": recall_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"f1": fa_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
}
# Data collator
lowercase__ = DataCollatorWithPadding(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase__ = trainer.evaluate()
lowercase__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
results.update(_SCREAMING_SNAKE_CASE )
# Predict
if training_args.do_predict:
lowercase__ = TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ , lowercase__ , lowercase__ = trainer.predict(_SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = align_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
lowercase__ = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return results
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 269 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( UpperCamelCase__ : list[list[int]] ) -> bool:
"""simple docstring"""
__lowerCamelCase = len(UpperCamelCase__ )
# We need to create solution object to save path.
__lowerCamelCase = [[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
__lowerCamelCase = run_maze(UpperCamelCase__ , 0 , 0 , UpperCamelCase__ )
if solved:
print('\n'.join(str(UpperCamelCase__ ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def lowerCamelCase_ ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[list[int]] ) -> bool:
"""simple docstring"""
__lowerCamelCase = len(UpperCamelCase__ )
# Final check point.
if i == j == (size - 1):
__lowerCamelCase = 1
return True
__lowerCamelCase = (not i < 0) and (not j < 0) # Check lower bounds
__lowerCamelCase = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__lowerCamelCase = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__lowerCamelCase = 1
# check for directions
if (
run_maze(UpperCamelCase__ , i + 1 , UpperCamelCase__ , UpperCamelCase__ )
or run_maze(UpperCamelCase__ , UpperCamelCase__ , j + 1 , UpperCamelCase__ )
or run_maze(UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ )
or run_maze(UpperCamelCase__ , UpperCamelCase__ , j - 1 , UpperCamelCase__ )
):
return True
__lowerCamelCase = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=10 , lowerCamelCase__=0.02 , lowerCamelCase__="divided_space_time" , lowerCamelCase__=None , ) -> Any:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = num_frames
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = attention_type
__lowerCamelCase = initializer_range
__lowerCamelCase = scope
__lowerCamelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__lowerCamelCase = (image_size // patch_size) ** 2
__lowerCamelCase = (num_frames) * self.num_patches_per_frame + 1
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__lowerCamelCase = self.num_labels
return config
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = TimesformerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = TimesformerForVideoClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
# verify the logits shape
__lowerCamelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case_ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = TimesformerModelTester(self )
__lowerCamelCase = ConfigTester(
self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> int:
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(lowerCamelCase__ )
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
return inputs_dict
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TimesformerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = True
for model_class in self.all_model_classes:
__lowerCamelCase = self.model_tester.seq_length
__lowerCamelCase = self.model_tester.num_frames
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = True
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCamelCase = True
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__lowerCamelCase = len(lowerCamelCase__ )
# Check attention is always last and order is fine
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(out_len + 1 , len(lowerCamelCase__ ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
__lowerCamelCase = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
__lowerCamelCase = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_video()
__lowerCamelCase = image_processor(video[:8] , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
__lowerCamelCase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowerCamelCase = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 90 | 1 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __snake_case (tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Dict , _UpperCAmelCase : float , _UpperCAmelCase : Callable , _UpperCAmelCase : int , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : str = None , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = initial_learning_rate
_lowerCAmelCase : Optional[int] = warmup_steps
_lowerCAmelCase : Dict = power
_lowerCAmelCase : Tuple = decay_schedule_fn
_lowerCAmelCase : Any = name
def __call__( self : Tuple , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
_lowerCAmelCase : str = tf.cast(_UpperCAmelCase , tf.floataa )
_lowerCAmelCase : int = tf.cast(self.warmup_steps , tf.floataa )
_lowerCAmelCase : str = global_step_float / warmup_steps_float
_lowerCAmelCase : Any = self.initial_learning_rate * tf.math.pow(_UpperCAmelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _UpperCAmelCase (UpperCamelCase_ : float , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : float = 0.9 , UpperCamelCase_ : float = 0.999 , UpperCamelCase_ : float = 1E-8 , UpperCamelCase_ : Optional[float] = None , UpperCamelCase_ : Optional[float] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : float = 1.0 , UpperCamelCase_ : Optional[List[str]] = None , ):
'''simple docstring'''
_lowerCAmelCase : str = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCamelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCamelCase_ , )
if num_warmup_steps:
_lowerCAmelCase : Union[str, Any] = WarmUp(
initial_learning_rate=UpperCamelCase_ , decay_schedule_fn=UpperCamelCase_ , warmup_steps=UpperCamelCase_ , )
if weight_decay_rate > 0.0:
_lowerCAmelCase : List[Any] = AdamWeightDecay(
learning_rate=UpperCamelCase_ , weight_decay_rate=UpperCamelCase_ , beta_a=UpperCamelCase_ , beta_a=UpperCamelCase_ , epsilon=UpperCamelCase_ , clipnorm=UpperCamelCase_ , global_clipnorm=UpperCamelCase_ , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=UpperCamelCase_ , )
else:
_lowerCAmelCase : Union[str, Any] = tf.keras.optimizers.Adam(
learning_rate=UpperCamelCase_ , beta_a=UpperCamelCase_ , beta_a=UpperCamelCase_ , epsilon=UpperCamelCase_ , clipnorm=UpperCamelCase_ , global_clipnorm=UpperCamelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __snake_case (_a ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , _UpperCAmelCase : float = 0.9 , _UpperCAmelCase : float = 0.999 , _UpperCAmelCase : float = 1E-7 , _UpperCAmelCase : bool = False , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : Optional[List[str]] = None , _UpperCAmelCase : Optional[List[str]] = None , _UpperCAmelCase : str = "AdamWeightDecay" , **_UpperCAmelCase : int , ) -> List[str]:
'''simple docstring'''
super().__init__(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase : Tuple = weight_decay_rate
_lowerCAmelCase : List[Any] = include_in_weight_decay
_lowerCAmelCase : Tuple = exclude_from_weight_decay
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , _UpperCAmelCase : int ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : str = {"""WarmUp""": WarmUp}
return super(_UpperCAmelCase , cls ).from_config(_UpperCAmelCase , custom_objects=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
super(_UpperCAmelCase , self )._prepare_local(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : List[Any] = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : str = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Tuple = list(zip(*_UpperCAmelCase ) )
return super(_UpperCAmelCase , self ).apply_gradients(zip(_UpperCAmelCase , _UpperCAmelCase ) , name=_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
_lowerCAmelCase : Dict = apply_state or {}
_lowerCAmelCase : Union[str, Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
_lowerCAmelCase : int = self._fallback_apply_state(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any]=None ) -> str:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , _UpperCAmelCase )
_lowerCAmelCase : str = self._decay_weights_op(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(_UpperCAmelCase , self )._resource_apply_dense(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple=None ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , _UpperCAmelCase )
_lowerCAmelCase : Tuple = self._decay_weights_op(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(_UpperCAmelCase , self )._resource_apply_sparse(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Any = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_UpperCAmelCase , _UpperCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_UpperCAmelCase , _UpperCAmelCase ) is not None:
return False
return True
class __snake_case (_a ):
def __init__( self : List[Any] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = None
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
'''simple docstring'''
if self._accum_steps is None:
_lowerCAmelCase : List[str] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
'''simple docstring'''
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
if not self._gradients:
_lowerCAmelCase : Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_UpperCAmelCase ) , trainable=_UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_UpperCAmelCase ) != len(self._gradients ):
raise ValueError(f"Expected {len(self._gradients )} gradients, but got {len(_UpperCAmelCase )}" )
for accum_gradient, gradient in zip(self._gradients , _UpperCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_UpperCAmelCase )
self._accum_steps.assign_add(1 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_UpperCAmelCase ) )
| 159 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
_lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_lowerCAmelCase : Optional[Any] = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , **_UpperCAmelCase : Any ) -> str:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , **_UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : Tuple = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Tuple = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
_lowerCAmelCase : List[str] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Tuple = image_processor(_UpperCAmelCase , return_tensors="""np""" )
_lowerCAmelCase : Union[str, Any] = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : int = processor(text=_UpperCAmelCase )
_lowerCAmelCase : Dict = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Any = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : Tuple = """lower newer"""
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Optional[int] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(_UpperCAmelCase ):
processor()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : str = processor.batch_decode(_UpperCAmelCase )
_lowerCAmelCase : List[str] = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : Dict = """lower newer"""
_lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
_lowerCAmelCase : Union[str, Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 159 | 1 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A__ : List[str] = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : List[Any], lowerCamelCase : Dict, lowerCamelCase : str=None, lowerCamelCase : Optional[int]=1 ):
'''simple docstring'''
lowercase__ = tokenizer
lowercase__ = dataset
lowercase__ = len(lowerCamelCase ) if n_tasks is None else n_tasks
lowercase__ = n_copies
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
lowercase__ = self.tokenizer(lowerCamelCase, padding=lowerCamelCase, return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : List[Any], lowerCamelCase : str, lowerCamelCase : Any, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = start_length
lowercase__ = eof_strings
lowercase__ = tokenizer
def __call__( self : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Tuple, **lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowercase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = re.split('''(%s)''' % '''|'''.join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=20 , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
lowercase__ = batch["ids"].shape[-1]
lowercase__ = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
lowercase__ = batch["task_id"].repeat(__lowerCAmelCase )
lowercase__ = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowercase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowercase__ = generated_tokens.cpu().numpy()
lowercase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
lowercase__ = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowercase__ = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def a ( ):
'''simple docstring'''
# Setup configuration
lowercase__ = HfArgumentParser(__lowerCAmelCase )
lowercase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowercase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowercase__ = "false"
if args.num_workers is None:
lowercase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowercase__ = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
lowercase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowercase__ = tokenizer.eos_token
lowercase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowercase__ = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
lowercase__ = load_dataset('''openai_humaneval''' )
lowercase__ = load_metric('''code_eval''' )
lowercase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
lowercase__ = args.n_samples // args.batch_size
lowercase__ = TokenizedDataset(__lowerCAmelCase , human_eval['''test'''] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowercase__ = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowercase__ = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`'''
''' flag to enable code evaluation.''' )
raise exception
lowercase__ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
lowercase__ = []
for task in tqdm(range(__lowerCAmelCase ) ):
lowercase__ = human_eval["test"][task]["test"]
lowercase__ = F"""check({human_eval['test'][task]['entry_point']})"""
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
lowercase__ = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 207 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : Union[str, Any] =logging.get_logger(__name__)
lowerCamelCase : str ={
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase : str ={
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
lowerCamelCase : str ={'''facebook/blenderbot-3B''': 128}
class __a ( A__ ):
_lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
_lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : int = ['''input_ids''', '''attention_mask''']
_lowerCAmelCase : Tuple = BlenderbotTokenizer
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Tuple="replace" , SCREAMING_SNAKE_CASE : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE : str="</s>" , SCREAMING_SNAKE_CASE : str="</s>" , SCREAMING_SNAKE_CASE : Tuple="<s>" , SCREAMING_SNAKE_CASE : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE : str="<pad>" , SCREAMING_SNAKE_CASE : Union[str, Any]="<mask>" , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : List[Any]=True , **SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , errors=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCamelCase__ : str = getattr(SCREAMING_SNAKE_CASE , pre_tok_state.pop("type" ) )
UpperCamelCase__ : Union[str, Any] = add_prefix_space
UpperCamelCase__ : Union[str, Any] = pre_tok_class(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = add_prefix_space
UpperCamelCase__ : Optional[int] = "post_processor"
UpperCamelCase__ : Union[str, Any] = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
UpperCamelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase__ : Tuple = tuple(state["sep"] )
if "cls" in state:
UpperCamelCase__ : Optional[int] = tuple(state["cls"] )
UpperCamelCase__ : List[Any] = False
if state.get("add_prefix_space" , SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCamelCase__ : str = add_prefix_space
UpperCamelCase__ : Optional[Any] = True
if state.get("trim_offsets" , SCREAMING_SNAKE_CASE ) != trim_offsets:
UpperCamelCase__ : Optional[Any] = trim_offsets
UpperCamelCase__ : Tuple = True
if changes_to_apply:
UpperCamelCase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE , state.pop("type" ) )
UpperCamelCase__ : Dict = component_class(**SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Dict = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else value
UpperCamelCase__ : str = value
def __lowercase ( self : Any , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = kwargs.get("is_split_into_words" , SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
UpperCamelCase__ : Dict = kwargs.get("is_split_into_words" , SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = [self.sep_token_id]
UpperCamelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : "Conversation" ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = " ".join(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = self.encode(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > self.model_max_length:
UpperCamelCase__ : Dict = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 189 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''openai/whisper-base'''
__snake_case = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
__snake_case = '''transcriber'''
__snake_case = WhisperProcessor
__snake_case = WhisperForConditionalGeneration
__snake_case = ['''audio''']
__snake_case = ['''text''']
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[Any] ) ->List[str]:
"""simple docstring"""
return self.pre_processor(__UpperCAmelCase , return_tensors='''pt''' ).input_features
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : int ) ->List[Any]:
"""simple docstring"""
return self.model.generate(inputs=__UpperCAmelCase )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[Any] ) ->Any:
"""simple docstring"""
return self.pre_processor.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )[0]
| 26 |
import math
def _a ( a :int = 100 ) -> int:
a = sum(i * i for i in range(1 , n + 1 ) )
a = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__UpperCamelCase : Optional[int] = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_8000,
'sample_size': 6_5536,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_8000,
'sample_size': 6_5536,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_8000,
'sample_size': 13_1072,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_6000,
'sample_size': 6_5536,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_6000,
'sample_size': 6_5536,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_6000,
'sample_size': 6_5536,
},
}
def A ( _lowercase , _lowercase ):
return torch.atana(_lowercase , _lowercase ) / math.pi * 2
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = torch.sin(t * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE : List[str] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_lowercase , _lowercase )
class lowercase__ ( UpperCamelCase_):
pass
class lowercase__ ( nn.Module):
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 )
SCREAMING_SNAKE_CASE : Optional[Any] = deepcopy(self.diffusion )
SCREAMING_SNAKE_CASE : str = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = MODELS_MAP[model_name]['''url''']
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
__UpperCamelCase : Tuple = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
__UpperCamelCase : Union[str, Any] = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
__UpperCamelCase : Optional[Any] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
__UpperCamelCase : str = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
__UpperCamelCase : str = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
__UpperCamelCase : str = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def A ( _lowercase ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def A ( _lowercase ):
for key, value in ATTN_MAP.items():
if name.startswith(_lowercase ) and not isinstance(_lowercase , _lowercase ):
return name.replace(_lowercase , _lowercase )
elif name.startswith(_lowercase ):
return [name.replace(_lowercase , _lowercase ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def A ( _lowercase , _lowercase=13 ):
SCREAMING_SNAKE_CASE : Dict = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
if string.startswith('''net.3.''' ):
depth += 1
SCREAMING_SNAKE_CASE : Tuple = string[6:]
elif string.startswith('''net.''' ):
SCREAMING_SNAKE_CASE : List[Any] = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
SCREAMING_SNAKE_CASE : Any = string[7:]
if string.startswith('''main.''' ):
SCREAMING_SNAKE_CASE : Tuple = string[5:]
# mid block
if string[:2].isdigit():
SCREAMING_SNAKE_CASE : str = string[:2]
SCREAMING_SNAKE_CASE : Dict = string[2:]
else:
SCREAMING_SNAKE_CASE : int = string[0]
SCREAMING_SNAKE_CASE : str = string[1:]
if depth == max_depth:
SCREAMING_SNAKE_CASE : List[str] = MID_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE : int = '''mid_block'''
elif depth > 0 and int(_lowercase ) < 7:
SCREAMING_SNAKE_CASE : Optional[int] = DOWN_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE : List[Any] = f"""down_blocks.{depth}"""
elif depth > 0 and int(_lowercase ) > 7:
SCREAMING_SNAKE_CASE : Tuple = UP_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE : str = f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
SCREAMING_SNAKE_CASE : List[str] = DEPTH_0_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE : Optional[int] = f"""up_blocks.{max_depth - 1}""" if int(_lowercase ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
SCREAMING_SNAKE_CASE : Optional[int] = string_left[1:]
if "resnets" in new_layer:
SCREAMING_SNAKE_CASE : int = convert_resconv_naming(_lowercase )
elif "attentions" in new_layer:
SCREAMING_SNAKE_CASE : List[Any] = convert_attn_naming(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = new_string_left
if not isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = prefix + '''.''' + new_layer + '''.''' + string_left
else:
SCREAMING_SNAKE_CASE : Optional[int] = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : int = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = rename(_lowercase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = transform_conv_attns(_lowercase , _lowercase , _lowercase )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = v
return new_state_dict
def A ( _lowercase , _lowercase , _lowercase ):
if len(_lowercase ) == 1:
if len(v.shape ) == 3:
# weight
SCREAMING_SNAKE_CASE : Dict = v[:, :, 0]
else:
# bias
SCREAMING_SNAKE_CASE : Optional[int] = v
else:
# qkv matrices
SCREAMING_SNAKE_CASE : int = v.shape[0]
SCREAMING_SNAKE_CASE : List[str] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
SCREAMING_SNAKE_CASE : str = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
SCREAMING_SNAKE_CASE : Optional[int] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE : int = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
SCREAMING_SNAKE_CASE : List[str] = download(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = MODELS_MAP[model_name]['''sample_rate''']
SCREAMING_SNAKE_CASE : Optional[int] = MODELS_MAP[model_name]['''sample_size''']
SCREAMING_SNAKE_CASE : str = Object()
SCREAMING_SNAKE_CASE : Optional[int] = sample_size
SCREAMING_SNAKE_CASE : int = sample_rate
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Any = UNetaDModel(sample_size=_lowercase , sample_rate=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = diffusers_model.state_dict()
SCREAMING_SNAKE_CASE : Dict = DiffusionUncond(_lowercase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowercase )['''state_dict'''] )
SCREAMING_SNAKE_CASE : str = orig_model.diffusion_ema.eval()
SCREAMING_SNAKE_CASE : Dict = orig_model.state_dict()
SCREAMING_SNAKE_CASE : List[Any] = rename_orig_weights(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
SCREAMING_SNAKE_CASE : Union[str, Any] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_lowercase ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('''kernel''' ) for k in list(_lowercase ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
SCREAMING_SNAKE_CASE : List[str] = value.squeeze()
SCREAMING_SNAKE_CASE : int = value
diffusers_model.load_state_dict(_lowercase )
SCREAMING_SNAKE_CASE : Dict = 100
SCREAMING_SNAKE_CASE : List[Any] = 33
SCREAMING_SNAKE_CASE : List[str] = IPNDMScheduler(num_train_timesteps=_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn([1, 2, config.sample_size] , generator=_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE : str = torch.linspace(1 , 0 , steps + 1 , device=_lowercase )[:-1]
SCREAMING_SNAKE_CASE : Union[str, Any] = get_crash_schedule(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = DanceDiffusionPipeline(unet=_lowercase , scheduler=_lowercase )
SCREAMING_SNAKE_CASE : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(num_inference_steps=_lowercase , generator=_lowercase ).audios
SCREAMING_SNAKE_CASE : List[Any] = sampling.iplms_sample(_lowercase , _lowercase , _lowercase , {} )
SCREAMING_SNAKE_CASE : List[str] = generated.clamp(-1 , 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = (generated - audio).abs().sum()
SCREAMING_SNAKE_CASE : Union[str, Any] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , _lowercase )
print('''Diff max''' , _lowercase )
assert diff_max < 1e-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
__UpperCamelCase : Any = parser.parse_args()
main(args)
| 182 | import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = split_dict._to_yaml_list()
assert len(_lowercase ) == len(_lowercase )
SCREAMING_SNAKE_CASE : Tuple = SplitDict._from_yaml_list(_lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
SCREAMING_SNAKE_CASE : Any = None
# the split name of split_dict takes over the name of the split info object
SCREAMING_SNAKE_CASE : Optional[Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=_lowercase ), SplitInfo(dataset_name='''my_dataset''' )] )
def A ( _lowercase ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
SCREAMING_SNAKE_CASE : List[Any] = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 182 | 1 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase: str = logging.get_logger(__name__)
_UpperCamelCase: Dict = '▁'
_UpperCamelCase: Any = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
_UpperCamelCase: Optional[int] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
_UpperCamelCase: Optional[Any] = {
'facebook/s2t-small-librispeech-asr': 1_0_2_4,
}
_UpperCamelCase: str = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
_UpperCamelCase: List[Any] = {'mustc': MUSTC_LANGS}
class a__ ( lowercase__ ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = MAX_MODEL_INPUT_SIZES
_lowerCamelCase = ["""input_ids""", """attention_mask"""]
_lowerCamelCase = []
def __init__( self : int, lowerCAmelCase : Any, lowerCAmelCase : str, lowerCAmelCase : Dict="<s>", lowerCAmelCase : Optional[int]="</s>", lowerCAmelCase : Tuple="<pad>", lowerCAmelCase : List[str]="<unk>", lowerCAmelCase : Optional[int]=False, lowerCAmelCase : int=False, lowerCAmelCase : Any=None, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : Union[str, Any] = None, **lowerCAmelCase : List[str], ) -> Tuple:
lowercase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_, eos_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, do_upper_case=lowercase_, do_lower_case=lowercase_, tgt_lang=lowercase_, lang_codes=lowercase_, sp_model_kwargs=self.sp_model_kwargs, **lowercase_, )
lowercase : Optional[int] = do_upper_case
lowercase : Optional[Any] = do_lower_case
lowercase : Optional[int] = load_json(lowercase_ )
lowercase : Any = {v: k for k, v in self.encoder.items()}
lowercase : Dict = spm_file
lowercase : Dict = load_spm(lowercase_, self.sp_model_kwargs )
if lang_codes is not None:
lowercase : str = lang_codes
lowercase : str = LANGUAGES[lang_codes]
lowercase : List[Any] = [f'''<lang:{lang}>''' for lang in self.langs]
lowercase : int = {lang: self.sp_model.PieceToId(f'''<lang:{lang}>''' ) for lang in self.langs}
lowercase : Dict = self.lang_tokens
lowercase : Union[str, Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowercase : Optional[int] = {}
@property
def lowercase ( self : Tuple ) -> Tuple:
return len(self.encoder )
@property
def lowercase ( self : Optional[Any] ) -> Optional[int]:
return self._tgt_lang
@tgt_lang.setter
def lowercase ( self : Optional[int], lowerCAmelCase : int ) -> int:
lowercase : Optional[int] = new_tgt_lang
self.set_tgt_lang_special_tokens(lowercase_ )
def lowercase ( self : int, lowerCAmelCase : str ) -> Union[str, Any]:
lowercase : str = self.lang_code_to_id[tgt_lang]
lowercase : List[Any] = [lang_code_id]
def lowercase ( self : Optional[int], lowerCAmelCase : List[str] ) -> Any:
return self.sp_model.encode(lowercase_, out_type=lowercase_ )
def lowercase ( self : List[str], lowerCAmelCase : Union[str, Any] ) -> Any:
return self.encoder.get(lowercase_, self.encoder[self.unk_token] )
def lowercase ( self : Tuple, lowerCAmelCase : int ) -> Dict:
return self.decoder.get(lowercase_, self.unk_token )
def lowercase ( self : Optional[Any], lowerCAmelCase : Optional[int] ) -> Optional[Any]:
lowercase : Dict = []
lowercase : List[str] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowercase : Any = self.sp_model.decode(lowercase_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowercase : str = []
else:
current_sub_tokens.append(lowercase_ )
lowercase : Union[str, Any] = self.sp_model.decode(lowercase_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase ( self : Optional[Any], lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any]=None ) -> Optional[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase ( self : Tuple, lowerCAmelCase : Optional[Any], lowerCAmelCase : List[str] = None, lowerCAmelCase : Optional[Any] = False ) -> Tuple:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_, token_ids_a=lowercase_, already_has_special_tokens=lowercase_ )
lowercase : Optional[int] = [1] * len(self.prefix_tokens )
lowercase : Any = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase_ )) + ([0] * len(lowercase_ )) + suffix_ones
def lowercase ( self : Any ) -> int:
lowercase : str = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> str:
lowercase : str = self.__dict__.copy()
lowercase : Dict = None
return state
def __setstate__( self : str, lowerCAmelCase : int ) -> Tuple:
lowercase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
lowercase : int = {}
lowercase : int = load_spm(self.spm_file, self.sp_model_kwargs )
def lowercase ( self : str, lowerCAmelCase : Any, lowerCAmelCase : Optional[int] = None ) -> Optional[int]:
lowercase : int = Path(lowercase_ )
assert save_dir.is_dir(), f'''{save_directory} should be a directory'''
lowercase : Optional[int] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
lowercase : List[Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder, lowercase_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file, lowercase_ )
elif not os.path.isfile(self.spm_file ):
with open(lowercase_, 'wb' ) as fi:
lowercase : Any = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (str(lowercase_ ), str(lowercase_ ))
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = sentencepiece.SentencePieceProcessor(**__lowerCamelCase )
spm.Load(str(__lowerCamelCase ) )
return spm
def lowercase__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
with open(__lowerCamelCase , 'r' ) as f:
return json.load(__lowerCamelCase )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
with open(__lowerCamelCase , 'w' ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase , indent=2 )
| 351 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowercase__ ( ) -> Dict:
'''simple docstring'''
lowercase : List[Any] = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
lowercase : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
return image
def lowercase__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = dct.pop(_UpperCAmelCase )
lowercase : Tuple = val
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase : Optional[int] = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowercase : int = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowercase : List[Any] = torch.cat((q_bias, torch.zeros_like(_UpperCAmelCase , requires_grad=_UpperCAmelCase ), v_bias) )
lowercase : Optional[Any] = qkv_bias
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase : List[str] = 3_64 if 'coco' in model_name else 2_24
lowercase : int = BlipaVisionConfig(image_size=_UpperCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowercase : Optional[int] = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_UpperCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
lowercase : List[str] = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_UpperCAmelCase ).to_dict()
elif "t5-xl" in model_name:
lowercase : int = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase : Optional[Any] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
lowercase : int = BlipaConfig(vision_config=_UpperCAmelCase , text_config=_UpperCAmelCase )
return config, image_size
@torch.no_grad()
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
lowercase : Any = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
lowercase : Any = tokenizer('\n' , add_special_tokens=_UpperCAmelCase ).input_ids[0]
lowercase , lowercase : Union[str, Any] = get_blipa_config(_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
lowercase : Any = BlipaForConditionalGeneration(_UpperCAmelCase ).eval()
lowercase : Any = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
lowercase , lowercase : Optional[int] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
lowercase : Dict = 'cuda' if torch.cuda.is_available() else 'cpu'
lowercase , lowercase , lowercase : List[str] = load_model_and_preprocess(
name=_UpperCAmelCase , model_type=_UpperCAmelCase , is_eval=_UpperCAmelCase , device=_UpperCAmelCase )
original_model.eval()
print('Done!' )
# update state dict keys
lowercase : int = original_model.state_dict()
lowercase : str = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase : Dict = state_dict.pop(_UpperCAmelCase )
if key.startswith('Qformer.bert' ):
lowercase : List[Any] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
lowercase : List[Any] = key.replace('self' , 'attention' )
if "opt_proj" in key:
lowercase : Any = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
lowercase : List[Any] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
lowercase : Optional[Any] = key.replace('opt' , 'language' )
if key.startswith('t5' ):
lowercase : Optional[Any] = key.replace('t5' , 'language' )
lowercase : Tuple = val
# read in qv biases
read_in_q_v_bias(_UpperCAmelCase , _UpperCAmelCase )
lowercase , lowercase : str = hf_model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
assert len(_UpperCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowercase : List[Any] = load_demo_image()
lowercase : Optional[Any] = vis_processors['eval'](_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
lowercase : str = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_UpperCAmelCase )
# create processor
lowercase : List[Any] = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase )
lowercase : Union[str, Any] = BlipaProcessor(image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
lowercase : Tuple = processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values.to(_UpperCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
original_model.to(_UpperCAmelCase )
hf_model.to(_UpperCAmelCase )
with torch.no_grad():
if "opt" in model_name:
lowercase : Any = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
lowercase : str = hf_model(_UpperCAmelCase , _UpperCAmelCase ).logits
else:
lowercase : Tuple = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
lowercase : Dict = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
lowercase : Tuple = hf_model(_UpperCAmelCase , _UpperCAmelCase , labels=_UpperCAmelCase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowercase : str = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=_UpperCAmelCase )
assert torch.allclose(logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowercase : Any = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=_UpperCAmelCase )
else:
# cast to same type
lowercase : Dict = logits.dtype
assert torch.allclose(original_logits.to(_UpperCAmelCase ) , _UpperCAmelCase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
lowercase : str = ''
lowercase : List[str] = tokenizer(_UpperCAmelCase , return_tensors='pt' ).input_ids.to(_UpperCAmelCase )
lowercase : Any = original_model.generate({'image': original_pixel_values} )
lowercase : Union[str, Any] = hf_model.generate(
_UpperCAmelCase , _UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _UpperCAmelCase )
lowercase : str = input_ids.shape[1]
lowercase : Dict = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_UpperCAmelCase )
lowercase : Optional[int] = [text.strip() for text in output_text]
print('HF generation:' , _UpperCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_UpperCAmelCase )
hf_model.save_pretrained(_UpperCAmelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_UpperCamelCase: Optional[Any] = argparse.ArgumentParser()
_UpperCamelCase: Dict = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
_UpperCamelCase: int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 53 | 0 |
from heapq import heappop, heappush
import numpy as np
def UpperCamelCase( __UpperCamelCase : np.ndarray ,__UpperCamelCase : tuple[int, int] ,__UpperCamelCase : tuple[int, int] ,__UpperCamelCase : bool ,):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = grid.shape
lowerCAmelCase_ : int = [-1, 1, 0, 0]
lowerCAmelCase_ : Optional[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = [(0, source)], set()
lowerCAmelCase_ : int = np.full((rows, cols) ,np.inf )
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Tuple = np.empty((rows, cols) ,dtype=__UpperCamelCase )
lowerCAmelCase_ : Any = None
while queue:
((lowerCAmelCase_) , (lowerCAmelCase_)) : Dict = heappop(__UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCAmelCase_ : Any = []
while (x, y) != source:
path.append((x, y) )
lowerCAmelCase_ , lowerCAmelCase_ : Any = predecessors[x, y]
path.append(__UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__UpperCamelCase ) ):
lowerCAmelCase_ , lowerCAmelCase_ : str = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCAmelCase_ : Union[str, Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__UpperCamelCase ,(dist + 1, (nx, ny)) )
lowerCAmelCase_ : Any = dist + 1
lowerCAmelCase_ : Union[str, Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCamelCase( __UpperCamelCase : List[str] ):
lowerCAmelCase_ : List[str] = SwinvaConfig()
lowerCAmelCase_ : List[str] = swinva_name.split('''_''' )
lowerCAmelCase_ : str = name_split[1]
if "to" in name_split[3]:
lowerCAmelCase_ : List[Any] = int(name_split[3][-3:] )
else:
lowerCAmelCase_ : List[Any] = int(name_split[3] )
if "to" in name_split[2]:
lowerCAmelCase_ : List[str] = int(name_split[2][-2:] )
else:
lowerCAmelCase_ : int = int(name_split[2][6:] )
if model_size == "tiny":
lowerCAmelCase_ : Any = 96
lowerCAmelCase_ : List[str] = (2, 2, 6, 2)
lowerCAmelCase_ : Union[str, Any] = (3, 6, 12, 24)
elif model_size == "small":
lowerCAmelCase_ : List[str] = 96
lowerCAmelCase_ : Any = (2, 2, 18, 2)
lowerCAmelCase_ : Dict = (3, 6, 12, 24)
elif model_size == "base":
lowerCAmelCase_ : Union[str, Any] = 128
lowerCAmelCase_ : List[Any] = (2, 2, 18, 2)
lowerCAmelCase_ : Tuple = (4, 8, 16, 32)
else:
lowerCAmelCase_ : Optional[Any] = 192
lowerCAmelCase_ : List[Any] = (2, 2, 18, 2)
lowerCAmelCase_ : List[Any] = (6, 12, 24, 48)
if "to" in swinva_name:
lowerCAmelCase_ : Union[str, Any] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
lowerCAmelCase_ : Optional[int] = 21841
lowerCAmelCase_ : Any = '''huggingface/label-files'''
lowerCAmelCase_ : Tuple = '''imagenet-22k-id2label.json'''
lowerCAmelCase_ : Any = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCAmelCase_ : Optional[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ : str = idalabel
lowerCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
else:
lowerCAmelCase_ : Optional[int] = 1000
lowerCAmelCase_ : Tuple = '''huggingface/label-files'''
lowerCAmelCase_ : Union[str, Any] = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ : Dict = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCAmelCase_ : int = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ : List[str] = idalabel
lowerCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = img_size
lowerCAmelCase_ : Dict = num_classes
lowerCAmelCase_ : Dict = embed_dim
lowerCAmelCase_ : Optional[Any] = depths
lowerCAmelCase_ : Optional[int] = num_heads
lowerCAmelCase_ : Dict = window_size
return config
def UpperCamelCase( __UpperCamelCase : List[str] ):
if "patch_embed.proj" in name:
lowerCAmelCase_ : Dict = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase_ : List[Any] = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCAmelCase_ : int = '''encoder.''' + name
if "attn.proj" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase_ : Tuple = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase_ : Tuple = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "q_bias" in name:
lowerCAmelCase_ : Tuple = name.replace('''q_bias''' ,'''query.bias''' )
if "k_bias" in name:
lowerCAmelCase_ : Tuple = name.replace('''k_bias''' ,'''key.bias''' )
if "v_bias" in name:
lowerCAmelCase_ : int = name.replace('''v_bias''' ,'''value.bias''' )
if "cpb_mlp" in name:
lowerCAmelCase_ : Any = name.replace('''cpb_mlp''' ,'''continuous_position_bias_mlp''' )
if name == "norm.weight":
lowerCAmelCase_ : Dict = '''layernorm.weight'''
if name == "norm.bias":
lowerCAmelCase_ : Any = '''layernorm.bias'''
if "head" in name:
lowerCAmelCase_ : int = name.replace('''head''' ,'''classifier''' )
else:
lowerCAmelCase_ : Union[str, Any] = '''swinv2.''' + name
return name
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ : Optional[int] = orig_state_dict.pop(__UpperCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
lowerCAmelCase_ : Dict = key.split('''.''' )
lowerCAmelCase_ : Any = int(key_split[1] )
lowerCAmelCase_ : Optional[int] = int(key_split[3] )
lowerCAmelCase_ : Dict = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase_ : Optional[Any] = val[:dim, :]
lowerCAmelCase_ : Any = val[dim : dim * 2, :]
lowerCAmelCase_ : List[Any] = val[-dim:, :]
else:
lowerCAmelCase_ : Dict = val[:dim]
lowerCAmelCase_ : Union[str, Any] = val[
dim : dim * 2
]
lowerCAmelCase_ : Dict = val[-dim:]
else:
lowerCAmelCase_ : Optional[Any] = val
return orig_state_dict
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : Dict ):
lowerCAmelCase_ : Optional[Any] = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
lowerCAmelCase_ : List[str] = get_swinva_config(__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = SwinvaForImageClassification(__UpperCamelCase )
model.eval()
lowerCAmelCase_ : str = convert_state_dict(timm_model.state_dict() ,__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
lowerCAmelCase_ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' ,'''-''' ) ) )
lowerCAmelCase_ : Union[str, Any] = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
lowerCAmelCase_ : Optional[Any] = image_processor(images=__UpperCamelCase ,return_tensors='''pt''' )
lowerCAmelCase_ : List[str] = timm_model(inputs['''pixel_values'''] )
lowerCAmelCase_ : Union[str, Any] = model(**__UpperCamelCase ).logits
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 )
print(f"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase ,__UpperCamelCase ) ,organization='''nandwalritik''' ,commit_message='''Add model''' ,)
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__ : Optional[Any] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 103 | 1 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _lowercase :
"""simple docstring"""
__A = XGLMConfig
__A = {}
__A = "gelu"
def __init__(self , lowerCamelCase_ , lowerCamelCase_=14 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=512 , lowerCamelCase_=0.02 , ):
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_labels
a = vocab_size
a = d_model
a = num_hidden_layers
a = num_attention_heads
a = ffn_dim
a = activation_function
a = activation_dropout
a = attention_dropout
a = max_position_embeddings
a = initializer_range
a = None
a = 0
a = 2
a = 1
def UpperCamelCase_ (self ):
"""simple docstring"""
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = self.get_config()
a = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCamelCase_ (self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCamelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCamelCase_ , )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class _lowercase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__A = (TFXGLMForCausalLM,) if is_tf_available() else ()
__A = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__A = False
__A = False
__A = False
def UpperCamelCase_ (self ):
"""simple docstring"""
a = TFXGLMModelTester(self )
a = ConfigTester(self , config_class=lowerCamelCase_ , n_embd=37 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = TFXGLMModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def UpperCamelCase_ (self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase_ (self , lowerCamelCase_=True ):
"""simple docstring"""
a = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
a = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
a = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
a = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase_ )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
a = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
a = tokenizer("Today is a nice day and" , return_tensors="tf" )
a = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
a = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ , seed=[7, 0] )
a = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase_ )
a = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
a = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
a = "left"
# use different length sentences to test batching
a = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
a = tokenizer(lowerCamelCase_ , return_tensors="tf" , padding=lowerCamelCase_ )
a = inputs["input_ids"]
a = model.generate(input_ids=lowerCamelCase_ , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
a = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
a = model.generate(input_ids=lowerCamelCase_ , max_new_tokens=12 )
a = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
a = model.generate(input_ids=lowerCamelCase_ , max_new_tokens=12 )
a = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
a = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase_ )
a = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase_ )
a = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [non_padded_sentence, padded_sentence] )
| 71 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: Optional[Any] = logging.get_logger(__name__)
_lowercase: Any = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "git_vision_model"
def __init__(self , lowerCamelCase_=768 , lowerCamelCase_=3072 , lowerCamelCase_=12 , lowerCamelCase_=12 , lowerCamelCase_=3 , lowerCamelCase_=224 , lowerCamelCase_=16 , lowerCamelCase_="quick_gelu" , lowerCamelCase_=1E-5 , lowerCamelCase_=0.0 , lowerCamelCase_=0.02 , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
a = num_channels
a = patch_size
a = image_size
a = initializer_range
a = attention_dropout
a = layer_norm_eps
a = hidden_act
@classmethod
def UpperCamelCase_ (cls , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase_ )
a , a = cls.get_config_dict(lowerCamelCase_ , **lowerCamelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
a = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase_ , **lowerCamelCase_ )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "git"
def __init__(self , lowerCamelCase_=None , lowerCamelCase_=30522 , lowerCamelCase_=768 , lowerCamelCase_=6 , lowerCamelCase_=12 , lowerCamelCase_=3072 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1024 , lowerCamelCase_=0.02 , lowerCamelCase_=1E-1_2 , lowerCamelCase_=0 , lowerCamelCase_="absolute" , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=101 , lowerCamelCase_=102 , lowerCamelCase_=None , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
if vision_config is None:
a = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
a = GitVisionConfig(**lowerCamelCase_ )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = use_cache
a = tie_word_embeddings
a = num_image_with_embedding
a = bos_token_id
a = eos_token_id
def UpperCamelCase_ (self ):
"""simple docstring"""
a = copy.deepcopy(self.__dict__ )
a = self.vision_config.to_dict()
a = self.__class__.model_type
return output
| 71 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if num <= 0:
__A = f'{num}: Invalid input, please enter a positive integer.'
raise ValueError(__UpperCamelCase )
__A = [True] * (num + 1)
__A = []
__A = 2
__A = int(math.sqrt(__UpperCamelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__UpperCamelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , __UpperCamelCase ):
if sieve[i] is True:
__A = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__UpperCamelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 266 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ = logging.get_logger(__name__)
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int], *_lowerCamelCase : Union[str, Any], **_lowerCamelCase : Dict ):
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''', _lowerCamelCase, )
super().__init__(*_lowerCamelCase, **_lowerCamelCase )
| 266 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
# Initialise PyTorch model
__snake_case : Optional[Any] = LxmertConfig.from_json_file(UpperCAmelCase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
__snake_case : Optional[Any] = LxmertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() ,UpperCAmelCase_ )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 352 |
'''simple docstring'''
import math
def a_ ( _UpperCAmelCase : int ) -> list:
__snake_case : Optional[Any] = [True] * n
__snake_case : Optional[int] = False
__snake_case : Dict = False
__snake_case : List[Any] = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
__snake_case : Optional[int] = i * 2
while index < n:
__snake_case : Union[str, Any] = False
__snake_case : int = index + i
__snake_case : Dict = [2]
for i in range(3 ,_UpperCAmelCase ,2 ):
if is_prime[i]:
primes.append(_UpperCAmelCase )
return primes
def a_ ( _UpperCAmelCase : int = 99_99_66_66_33_33 ) -> int:
__snake_case : List[Any] = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00
__snake_case : Tuple = prime_sieve(_UpperCAmelCase )
__snake_case : List[Any] = 0
__snake_case : List[Any] = 0
__snake_case : Optional[int] = primes[prime_index]
while (last_prime**2) <= limit:
__snake_case : Optional[int] = primes[prime_index + 1]
__snake_case : Union[str, Any] = last_prime**2
__snake_case : Dict = next_prime**2
# Get numbers divisible by lps(current)
__snake_case : Optional[Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__snake_case : Optional[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__snake_case : List[str] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__snake_case : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0 | 0 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase_ ( _snake_case : Tuple ) -> Tuple:
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _snake_case :
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
pass
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a=None , **_a ):
__magic_name__ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(__lowercase , __lowercase )
__magic_name__ : Tuple = TFVisionTextDualEncoderModel(__lowercase )
__magic_name__ : int = model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a=None , **_a ):
__magic_name__ , __magic_name__ : Optional[int] = self.get_vision_text_model(__lowercase , __lowercase )
__magic_name__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__magic_name__ : Dict = model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a=None , **_a ):
__magic_name__ , __magic_name__ : str = self.get_vision_text_model(__lowercase , __lowercase )
__magic_name__ : List[Any] = {"vision_model": vision_model, "text_model": text_model}
__magic_name__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowercase )
__magic_name__ : List[str] = model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a=None , **_a ):
__magic_name__ , __magic_name__ : Tuple = self.get_vision_text_model(__lowercase , __lowercase )
__magic_name__ : Any = TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__magic_name__ : Any = model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
__magic_name__ : Tuple = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__magic_name__ : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(__lowercase )
__magic_name__ : Dict = model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
__magic_name__ : List[Any] = after_output[0].numpy()
__magic_name__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowercase , 1e-5 )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a=None , **_a ):
__magic_name__ , __magic_name__ : Optional[int] = self.get_vision_text_model(__lowercase , __lowercase )
__magic_name__ : int = TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__magic_name__ : int = model(
input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , output_attentions=__lowercase )
__magic_name__ : Any = output.vision_model_output.attentions
self.assertEqual(len(__lowercase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ : Optional[Any] = to_atuple(vision_model.config.image_size )
__magic_name__ : Any = to_atuple(vision_model.config.patch_size )
__magic_name__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__magic_name__ : List[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__magic_name__ : str = output.text_model_output.attentions
self.assertEqual(len(__lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Optional[int] = np.abs((a - b) ).max()
self.assertLessEqual(__lowercase , __lowercase , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__lowercase )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__lowercase )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__lowercase )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**__lowercase )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : Any = self.get_pretrained_model_and_inputs()
__magic_name__ : int = model_a(**__lowercase )
__magic_name__ : Tuple = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__lowercase )
__magic_name__ : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(__lowercase )
__magic_name__ : Tuple = model_a(**__lowercase )
__magic_name__ : Dict = after_outputs[0].numpy()
__magic_name__ : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowercase , 1e-5 )
@require_tf
class _snake_case ( lowerCamelCase__ , unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
__magic_name__ : str = 13
__magic_name__ : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ : str = random_attention_mask([batch_size, 4] )
__magic_name__ : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : str = TFViTModel(__lowercase , name="vision_model" )
__magic_name__ : Tuple = TFBertModel(__lowercase , name="text_model" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = TFViTModelTester(self )
__magic_name__ : Union[str, Any] = TFBertModelTester(self )
__magic_name__ : int = vit_model_tester.prepare_config_and_inputs()
__magic_name__ : int = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _snake_case ( lowerCamelCase__ , unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
__magic_name__ : Optional[int] = 13
__magic_name__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ : Optional[Any] = random_attention_mask([batch_size, 4] )
__magic_name__ : Optional[Any] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a=None , **_a ):
__magic_name__ , __magic_name__ : Any = self.get_vision_text_model(__lowercase , __lowercase )
__magic_name__ : Tuple = TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__magic_name__ : Optional[Any] = model(
input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , output_attentions=__lowercase )
__magic_name__ : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(__lowercase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__magic_name__ : str = to_atuple(vision_model.config.image_size )
__magic_name__ : List[str] = to_atuple(vision_model.config.patch_size )
__magic_name__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__magic_name__ : Tuple = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__magic_name__ : Any = output.text_model_output.attentions
self.assertEqual(len(__lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : List[Any] = TFDeiTModel(__lowercase , name="vision_model" )
__magic_name__ : Union[str, Any] = TFRobertaModel(__lowercase , name="text_model" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = TFDeiTModelTester(self )
__magic_name__ : int = TFRobertaModelTester(self )
__magic_name__ : int = vit_model_tester.prepare_config_and_inputs()
__magic_name__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _snake_case ( lowerCamelCase__ , unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
__magic_name__ : Union[str, Any] = 13
__magic_name__ : Optional[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ : Tuple = random_attention_mask([batch_size, 4] )
__magic_name__ : Any = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Union[str, Any] = TFCLIPVisionModel(__lowercase , name="vision_model" )
__magic_name__ : Tuple = TFBertModel(__lowercase , name="text_model" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = TFCLIPVisionModelTester(self )
__magic_name__ : Tuple = TFBertModelTester(self )
__magic_name__ : Optional[int] = clip_model_tester.prepare_config_and_inputs()
__magic_name__ : Dict = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ : Dict = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=__lowercase )
__magic_name__ : str = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
__magic_name__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__magic_name__ : Tuple = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=__lowercase , padding=__lowercase , return_tensors="np" )
__magic_name__ : List[str] = model(**__lowercase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__magic_name__ : int = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __lowercase , atol=1e-3 ) )
| 281 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] ='autoformer'
__lowerCamelCase : str ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : bool = True , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.02 , __lowercase : bool = True , __lowercase : List[Any]=True , __lowercase : int = 10 , __lowercase : int = 25 , __lowercase : int = 3 , **__lowercase : Optional[int] , ):
'''simple docstring'''
# time series specific configuration
__a = prediction_length
__a = context_length if context_length is not None else prediction_length
__a = distribution_output
__a = loss
__a = input_size
__a = num_time_features
__a = lags_sequence
__a = scaling
__a = num_dynamic_real_features
__a = num_static_real_features
__a = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__a = cardinality
else:
__a = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__a = embedding_dimension
else:
__a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__a = num_parallel_samples
# Transformer architecture configuration
__a = input_size * len(self.lags_sequence ) + self._number_of_features
__a = d_model
__a = encoder_attention_heads
__a = decoder_attention_heads
__a = encoder_ffn_dim
__a = decoder_ffn_dim
__a = encoder_layers
__a = decoder_layers
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = activation_function
__a = init_std
__a = use_cache
# Autoformer
__a = label_length
__a = moving_average
__a = autocorrelation_factor
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 302 | 0 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_SCREAMING_SNAKE_CASE : List[Any] = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class _snake_case :
lowerCAmelCase_ : str
lowerCAmelCase_ : Optional[str] = None
lowerCAmelCase_ : Optional[Union[str, int]] = None
lowerCAmelCase_ : Optional[Union[str, int]] = None
lowerCAmelCase_ : Optional[Union[str, int]] = None
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Optional[Any]:
'''simple docstring'''
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return self.major, self.minor, self.patch
def lowerCAmelCase__ ( self , a__ ) -> Any:
'''simple docstring'''
if isinstance(a__ , a__ ):
return Version(a__ )
elif isinstance(a__ , a__ ):
return other
raise TypeError(F'{other} (type {type(a__ )}) cannot be compared to version.' )
def __eq__( self , a__ ) -> int:
'''simple docstring'''
try:
snake_case_ = self._validate_operand(a__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , a__ ) -> Tuple:
'''simple docstring'''
snake_case_ = self._validate_operand(a__ )
return self.tuple < other.tuple
def __hash__( self ) -> Dict:
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowerCAmelCase__ ( cls , a__ ) -> List[Any]:
'''simple docstring'''
snake_case_ = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return self.version_str
def UpperCamelCase_( snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = _VERSION_REG.match(snake_case )
if not res:
raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(snake_case ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def UpperCamelCase_( snake_case : Optional[Any] ):
'''simple docstring'''
return ".".join(str(snake_case ) for v in version_tuple )
| 92 |
'''simple docstring'''
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
while b:
snake_case_ , snake_case_ = b, a % b
return a
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(snake_case , a % b )
def UpperCamelCase_( ):
'''simple docstring'''
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 92 | 1 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
lowerCamelCase = logging.get_logger(__name__)
def a_ ( SCREAMING_SNAKE_CASE__ : Union[tf.Tensor, np.ndarray] ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
return list(tensor.shape )
_lowerCamelCase : str =tf.shape(SCREAMING_SNAKE_CASE__ )
if tensor.shape == tf.TensorShape(SCREAMING_SNAKE_CASE__ ):
return dynamic
_lowerCamelCase : Optional[int] =tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(SCREAMING_SNAKE_CASE__ )]
def a_ ( SCREAMING_SNAKE_CASE__ : tf.Tensor , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1e-9 , axis=SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
def a_ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : str=-1 ):
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
_lowerCamelCase , _lowerCamelCase : Union[str, Any] =tf.nn.moments(SCREAMING_SNAKE_CASE__ , axes=[axis] , keepdims=SCREAMING_SNAKE_CASE__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_lowerCamelCase : Union[str, Any] =[1] * inputs.shape.rank
_lowerCamelCase : Union[str, Any] =shape_list(SCREAMING_SNAKE_CASE__ )[axis]
_lowerCamelCase : Optional[int] =tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : List[Any] =tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Compute layer normalization using the batch_normalization
# function.
_lowerCamelCase : Any =tf.nn.batch_normalization(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , offset=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , variance_epsilon=SCREAMING_SNAKE_CASE__ , )
return outputs
def a_ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : str=-1 ):
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_lowerCamelCase : List[str] =tf.shape(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : int =tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_lowerCamelCase : Optional[Any] =tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a_ ( SCREAMING_SNAKE_CASE__ : tf.Tensor ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ):
_lowerCamelCase : str =tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_lowerCamelCase : List[str] =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_lowerCamelCase : Any =encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_lowerCamelCase : Any =(
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def a_ ( SCREAMING_SNAKE_CASE__ : tf.Tensor , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str = "input_ids" ):
'''simple docstring'''
tf.debugging.assert_less(
SCREAMING_SNAKE_CASE__ , tf.cast(SCREAMING_SNAKE_CASE__ , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(SCREAMING_SNAKE_CASE__ )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def a_ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[int] =64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_lowerCamelCase : Any =[x for x in data if len(SCREAMING_SNAKE_CASE__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
_lowerCamelCase : int =np.asarray(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Optional[int] =1
_lowerCamelCase : Dict =np.array_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_lowerCamelCase : List[str] =np.array_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase : List[Any] =chunk_data
else:
_lowerCamelCase : List[str] =data
def a_ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
if name in group.attrs:
_lowerCamelCase : Optional[Any] =[n.decode('utf8' ) if hasattr(SCREAMING_SNAKE_CASE__ , 'decode' ) else n for n in group.attrs[name]]
else:
_lowerCamelCase : Dict =[]
_lowerCamelCase : Dict =0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(SCREAMING_SNAKE_CASE__ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def a_ ( SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
def _expand_single_ad_tensor(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(SCREAMING_SNAKE_CASE__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , SCREAMING_SNAKE_CASE__ )
| 199 |
# Lint as: python3
import itertools
import os
import re
lowerCamelCase = re.compile(R'([A-Z]+)([A-Z][a-z])')
lowerCamelCase = re.compile(R'([a-z\d])([A-Z])')
lowerCamelCase = re.compile(R'(?<!_)_(?!_)')
lowerCamelCase = re.compile(R'(_{2,})')
lowerCamelCase = R'^\w+(\.\w+)*$'
lowerCamelCase = R'<>:/\|?*'
def a_ ( SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] =_uppercase_uppercase_re.sub(r'\1_\2' , SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : List[Any] =_lowercase_uppercase_re.sub(r'\1_\2' , SCREAMING_SNAKE_CASE__ )
return name.lower()
def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple =_single_underscore_re.split(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Tuple =[_multiple_underscores_re.split(SCREAMING_SNAKE_CASE__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__ ) if n != '' )
def a_ ( SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if os.path.basename(SCREAMING_SNAKE_CASE__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(SCREAMING_SNAKE_CASE__ )
def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
if os.path.basename(SCREAMING_SNAKE_CASE__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , SCREAMING_SNAKE_CASE__ ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(SCREAMING_SNAKE_CASE__ )}-{split}'''
def a_ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int=None ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] =filename_prefix_for_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
_lowerCamelCase : Optional[Any] =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return F'''{filepath}*'''
def a_ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
'''simple docstring'''
_lowerCamelCase : Dict =filename_prefix_for_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Union[str, Any] =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if shard_lengths:
_lowerCamelCase : Union[str, Any] =len(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Optional[int] =[F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(SCREAMING_SNAKE_CASE__ )]
if filetype_suffix:
_lowerCamelCase : List[Any] =[filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
_lowerCamelCase : Any =prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 199 | 1 |
def __lowercase ( a__ = 10**12 ) -> int:
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'''{solution() = }''')
| 357 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : str = field(
metadata={'''help''': '''The output directory where the model will be written.'''} , )
UpperCamelCase__ : str = field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} , )
UpperCamelCase__ : str = field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} , )
UpperCamelCase__ : Optional[str] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} )
UpperCamelCase__ : Optional[str] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} )
def __lowercase ( ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments,) )
((__SCREAMING_SNAKE_CASE) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=a__ , decoder_config=a__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
__SCREAMING_SNAKE_CASE = decoder_config.decoder_start_token_id
__SCREAMING_SNAKE_CASE = decoder_config.pad_token_id
if decoder_start_token_id is None:
__SCREAMING_SNAKE_CASE = decoder_config.bos_token_id
if pad_token_id is None:
__SCREAMING_SNAKE_CASE = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
__SCREAMING_SNAKE_CASE = decoder_config.eos_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 118 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
__UpperCAmelCase : List[str] = 'beit'
def __init__( self , UpperCamelCase__=8192 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=224 , UpperCamelCase__=16 , UpperCamelCase__=3 , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=True , UpperCamelCase__=[3, 5, 7, 11] , UpperCamelCase__=[1, 2, 3, 6] , UpperCamelCase__=True , UpperCamelCase__=0.4 , UpperCamelCase__=256 , UpperCamelCase__=1 , UpperCamelCase__=False , UpperCamelCase__=255 , **UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
snake_case : int = vocab_size
snake_case : int = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : Tuple = hidden_dropout_prob
snake_case : Optional[Any] = attention_probs_dropout_prob
snake_case : str = initializer_range
snake_case : Dict = layer_norm_eps
snake_case : Optional[Any] = image_size
snake_case : Tuple = patch_size
snake_case : List[str] = num_channels
snake_case : List[str] = use_mask_token
snake_case : Optional[int] = use_absolute_position_embeddings
snake_case : List[str] = use_relative_position_bias
snake_case : Optional[int] = use_shared_relative_position_bias
snake_case : str = layer_scale_init_value
snake_case : str = drop_path_rate
snake_case : List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case : Tuple = out_indices
snake_case : int = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case : Optional[int] = use_auxiliary_head
snake_case : Dict = auxiliary_loss_weight
snake_case : Optional[Any] = auxiliary_channels
snake_case : List[str] = auxiliary_num_convs
snake_case : List[str] = auxiliary_concat_input
snake_case : Dict = semantic_loss_ignore_index
class _lowerCAmelCase ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = version.parse('''1.11''' )
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCamelCase ( self ) -> float:
'''simple docstring'''
return 1e-4
| 203 | from manim import *
class A ( UpperCAmelCase_ ):
def lowercase_ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ = Rectangle(height=0.25 , width=0.25 )
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("CPU" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = [mem.copy() for i in range(4 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("GPU" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = [mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("Model" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
for i, rect in enumerate(__UpperCAmelCase ):
UpperCAmelCase__ = fill.copy().set_fill(__UpperCAmelCase , opacity=0.8 )
target.move_to(__UpperCAmelCase )
model_arr.append(__UpperCAmelCase )
UpperCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__UpperCAmelCase )
self.add(*__UpperCAmelCase , *__UpperCAmelCase )
UpperCAmelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase__ = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
UpperCAmelCase__ = Text("Disk" , font_size=2_4 )
UpperCAmelCase__ = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__UpperCAmelCase )
UpperCAmelCase__ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase ) )
UpperCAmelCase__ = Square(0.3 )
input.set_fill(__UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __UpperCAmelCase , buff=0.5 )
self.play(Write(__UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(__UpperCAmelCase ) )
self.play(FadeOut(__UpperCAmelCase ) )
UpperCAmelCase__ = Arrow(start=__UpperCAmelCase , end=__UpperCAmelCase , color=__UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , __UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
UpperCAmelCase__ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) )
UpperCAmelCase__ = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.02}
self.play(
Write(__UpperCAmelCase ) , Circumscribe(model_arr[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
UpperCAmelCase__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
UpperCAmelCase__ = AnimationGroup(
FadeOut(__UpperCAmelCase , run_time=0.5 ) , MoveToTarget(__UpperCAmelCase , run_time=0.5 ) , FadeIn(__UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
UpperCAmelCase__ = 0.7
self.play(
Circumscribe(model_arr[i] , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=__UpperCAmelCase , **__UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=__UpperCAmelCase , **__UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
UpperCAmelCase__ = a_c
UpperCAmelCase__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__UpperCAmelCase ) , FadeOut(__UpperCAmelCase , run_time=0.5 ) , )
UpperCAmelCase__ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase , run_time=3 ) , MoveToTarget(__UpperCAmelCase ) )
self.wait()
| 65 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if length <= 0 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(__UpperCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 371 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 336 | 0 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
_lowerCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : str = os.path.dirname(os.path.realpath(UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = os.path.join(UpperCamelCase , """words.txt""" )
lowerCAmelCase__ : str = """"""
with open(UpperCamelCase ) as f:
lowerCAmelCase__ : str = f.readline()
lowerCAmelCase__ : Union[str, Any] = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
lowerCAmelCase__ : Optional[Any] = [
word
for word in [sum(ord(UpperCamelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 37 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase = "cpu", lowerCamelCase = None ):
lowercase :Optional[Any] = torch.load(lowerCamelCase, map_location=lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCamelCase, torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
lowercase :List[Any] = v.half()
if save_path is None: # overwrite src_path
lowercase :Optional[Any] = src_path
torch.save(lowerCamelCase, lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 236 | 0 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _A ( __magic_name__ , __magic_name__ , **__magic_name__ ):
lowercase__ = AutoConfig.from_pretrained(_lowercase , **_lowercase )
lowercase__ = AutoModelForSeqaSeqLM.from_config(_lowercase )
model.save_pretrained(_lowercase )
AutoTokenizer.from_pretrained(_lowercase ).save_pretrained(_lowercase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 359 |
def _A ( __magic_name__ ):
try:
lowercase__ = float(__magic_name__ )
except ValueError:
raise ValueError("Please enter a valid number" )
lowercase__ = decimal - int(__magic_name__ )
if fractional_part == 0:
return int(__magic_name__ ), 1
else:
lowercase__ = len(str(__magic_name__ ).split("." )[1] )
lowercase__ = int(decimal * (10**number_of_frac_digits) )
lowercase__ = 10**number_of_frac_digits
lowercase__ , lowercase__ = denominator, numerator
while True:
lowercase__ = dividend % divisor
if remainder == 0:
break
lowercase__ , lowercase__ = divisor, remainder
lowercase__ , lowercase__ = numerator / divisor, denominator / divisor
return int(__magic_name__ ), int(__magic_name__ )
if __name__ == "__main__":
print(F"""{decimal_to_fraction(2) = }""")
print(F"""{decimal_to_fraction(89.0) = }""")
print(F"""{decimal_to_fraction("67") = }""")
print(F"""{decimal_to_fraction("45.0") = }""")
print(F"""{decimal_to_fraction(1.5) = }""")
print(F"""{decimal_to_fraction("6.25") = }""")
print(F"""{decimal_to_fraction("78td") = }""")
| 201 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class _A :
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str]=13 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Dict=24 , __SCREAMING_SNAKE_CASE : Optional[int]=16 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : Tuple=5 , __SCREAMING_SNAKE_CASE : Optional[Any]=4 , __SCREAMING_SNAKE_CASE : Optional[Any]=37 , __SCREAMING_SNAKE_CASE : List[str]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=10 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : List[Any]=2 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = patch_size
__a = max_length
__a = num_mel_bins
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
__a = frequency_stride
__a = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__a = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__a = (self.max_length - self.patch_size) // self.time_stride + 1
__a = frequency_out_dimension * time_out_dimension
__a = num_patches + 2
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins])
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = self.get_config()
return config, input_values, labels
def _lowerCamelCase ( self : Any):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = ASTModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Dict = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ : int = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
UpperCamelCase__ : str = False
UpperCamelCase__ : Optional[int] = False
UpperCamelCase__ : List[str] = False
UpperCamelCase__ : Optional[int] = False
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = ASTModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''')
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
__a = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''input_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ASTModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def __snake_case ( ):
__a = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
__a , __a = torchaudio.load(_UpperCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class _A ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''')
if is_torchaudio_available()
else None
)
@slow
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.default_feature_extractor
__a = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''').to(__SCREAMING_SNAKE_CASE)
__a = self.default_feature_extractor
__a , __a = prepare_audio()
__a = audio.squeeze().numpy()
__a = feature_extractor(__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
__a = model(**__SCREAMING_SNAKE_CASE)
# verify the logits
__a = torch.Size((1, 527))
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor([-0.87_60, -7.00_42, -8.66_02]).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 49 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case :List[str] = '''\
Text data.
Second line of data.'''
__snake_case :Optional[Any] = '''file'''
@pytest.fixture(scope='''session''' )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__a = bytes(_UpperCAmelCase , '''utf-8''' )
with zstd.open(_UpperCAmelCase , '''wb''' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture
def __snake_case ( _UpperCAmelCase ):
with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , '''w''' ) as f:
f.write(_UpperCAmelCase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__a = input_paths[compression_format]
__a = tmp_path / '''cache'''
__a = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase )
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
with open(_UpperCAmelCase ) as f:
__a = f.read()
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''custom_cache'''
__a = '''custom_extracted_dir'''
__a = tmp_path / '''custom_extracted_path'''
if default_extracted:
__a = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _UpperCAmelCase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_UpperCAmelCase ) )
__a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__a = xz_file
__a = (
DownloadConfig(extract_compressed_file=_UpperCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase )
)
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(Path(_UpperCAmelCase ).resolve() )
assert cached_path(_UpperCAmelCase ) == text_file
# relative path
__a = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_UpperCAmelCase ) == text_file
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
# relative path
__a = '''./__missing_file__.txt'''
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = get_from_cache(f'tmp://{tmpfs_file}' )
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( ):
with pytest.raises(_UpperCAmelCase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
http_get('''https://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
ftp_get('''ftp://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
fsspec_get('''s3://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
fsspec_head('''s3://huggingface.co''' )
| 49 | 1 |
'''simple docstring'''
def UpperCamelCase ( a = 200 ) -> int:
'''simple docstring'''
__magic_name__ = [1, 2, 5, 10, 20, 50, 100, 200]
__magic_name__ = [0] * (pence + 1)
__magic_name__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 362 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Tuple , a__ : Union[str, Any] , a__ : Union[str, Any]=7 , a__ : Dict=3 , a__ : Optional[Any]=18 , a__ : Optional[Any]=30 , a__ : Tuple=400 , a__ : Optional[int]=True , a__ : int=None , a__ : Union[str, Any]=True , a__ : Optional[Any]=None , a__ : str=True , a__ : List[Any]=[0.5, 0.5, 0.5] , a__ : Tuple=[0.5, 0.5, 0.5] , a__ : Union[str, Any]=False , ):
__magic_name__ = size if size is not None else {'''height''': 20, '''width''': 20}
__magic_name__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_center_crop
__magic_name__ = crop_size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
__magic_name__ = do_reduce_labels
def snake_case__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCamelCase ( ) -> str:
'''simple docstring'''
__magic_name__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__magic_name__ = Image.open(dataset[0]['''file'''] )
__magic_name__ = Image.open(dataset[1]['''file'''] )
return image, map
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
__magic_name__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__magic_name__ = Image.open(ds[0]['''file'''] )
__magic_name__ = Image.open(ds[1]['''file'''] )
__magic_name__ = Image.open(ds[2]['''file'''] )
__magic_name__ = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Optional[Any] = BeitImageProcessor if is_vision_available() else None
def snake_case__ ( self : Dict ):
__magic_name__ = BeitImageProcessingTester(self )
@property
def snake_case__ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , '''do_resize''' ) )
self.assertTrue(hasattr(a__ , '''size''' ) )
self.assertTrue(hasattr(a__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a__ , '''center_crop''' ) )
self.assertTrue(hasattr(a__ , '''do_normalize''' ) )
self.assertTrue(hasattr(a__ , '''image_mean''' ) )
self.assertTrue(hasattr(a__ , '''image_std''' ) )
def snake_case__ ( self : int ):
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , a__ )
__magic_name__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=a__ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , a__ )
def snake_case__ ( self : Optional[Any] ):
pass
def snake_case__ ( self : Dict ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Optional[int] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : List[str] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Union[str, Any] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
__magic_name__ = []
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
__magic_name__ , __magic_name__ = prepare_semantic_single_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
__magic_name__ , __magic_name__ = prepare_semantic_batch_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def snake_case__ ( self : Any ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__magic_name__ , __magic_name__ = prepare_semantic_single_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
__magic_name__ = True
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 98 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.