code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowerCAmelCase__ :
def __init__( self , a , ) -> int:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = 13
_UpperCamelCase = 7
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = 99
_UpperCamelCase = 32
_UpperCamelCase = 2
_UpperCamelCase = 4
_UpperCamelCase = 37
_UpperCamelCase = """gelu"""
_UpperCamelCase = 0.1
_UpperCamelCase = 0.1
_UpperCamelCase = 5_12
_UpperCamelCase = 16
_UpperCamelCase = 2
_UpperCamelCase = 0.02
_UpperCamelCase = 3
_UpperCamelCase = 4
_UpperCamelCase = None
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self , a , a , a , a , a , a ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = TFDistilBertModel(config=a )
_UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_UpperCamelCase = model(a )
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , a , a , a , a , a , a ) -> str:
'''simple docstring'''
_UpperCamelCase = TFDistilBertForMaskedLM(config=a )
_UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_UpperCamelCase = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , a , a , a , a , a , a ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = TFDistilBertForQuestionAnswering(config=a )
_UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
_UpperCamelCase = model(a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , a , a , a , a , a , a ) -> str:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDistilBertForSequenceClassification(a )
_UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_UpperCamelCase = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , a , a , a , a , a , a ) -> int:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = TFDistilBertForMultipleChoice(a )
_UpperCamelCase = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
_UpperCamelCase = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self , a , a , a , a , a , a ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDistilBertForTokenClassification(a )
_UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_UpperCamelCase = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = config_and_inputs
_UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( __lowercase , __lowercase , unittest.TestCase ):
UpperCamelCase_ : Tuple = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ : Tuple = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : int = False
UpperCamelCase_ : Tuple = False
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = TFDistilBertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=a , dim=37 )
def A_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
@slow
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
_UpperCamelCase = TFDistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = model(a )[0]
_UpperCamelCase = [1, 6, 7_68]
self.assertEqual(output.shape , a )
_UpperCamelCase = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , a , atol=1e-4 )
| 612 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 612 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = KandinskyImgaImgPipeline
_lowerCamelCase: Dict = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
_lowerCamelCase: Any = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_lowerCamelCase: Dict = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowerCamelCase: Tuple = False
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.time_input_dim
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
return 100
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
A = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
torch.manual_seed(0 )
A = MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=1005 ,)
A = MultilingualCLIP(A_ )
A = text_encoder.eval()
return text_encoder
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
A = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
A = UNetaDConditionModel(**A_ )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
torch.manual_seed(0 )
A = VQModel(**self.dummy_movq_kwargs )
return model
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
A = self.dummy_text_encoder
A = self.dummy_tokenizer
A = self.dummy_unet
A = self.dummy_movq
A = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.0_00_85,
'beta_end': 0.0_12,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
A = DDIMScheduler(**A_ )
A = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Any ,A_ : Tuple=0 ) -> int:
A = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(A_ ) ).to(A_ )
A = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
A = floats_tensor((1, 3, 64, 64) ,rng=random.Random(A_ ) ).to(A_ )
A = image.cpu().permute(0 ,2 ,3 ,1 )[0]
A = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
if str(A_ ).startswith('mps' ):
A = torch.manual_seed(A_ )
else:
A = torch.Generator(device=A_ ).manual_seed(A_ )
A = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
A = 'cpu'
A = self.get_dummy_components()
A = self.pipeline_class(**A_ )
A = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
A = pipe(**self.get_dummy_inputs(A_ ) )
A = output.images
A = pipe(
**self.get_dummy_inputs(A_ ) ,return_dict=A_ ,)[0]
A = image[0, -3:, -3:, -1]
A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
A = 'A red cartoon frog, 4k'
A = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' ,torch_dtype=torch.floataa )
pipe_prior.to(A_ )
A = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' ,torch_dtype=torch.floataa )
A = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
A = torch.Generator(device='cpu' ).manual_seed(0 )
A , A = pipe_prior(
A_ ,generator=A_ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
A = pipeline(
A_ ,image=A_ ,image_embeds=A_ ,negative_image_embeds=A_ ,generator=A_ ,num_inference_steps=100 ,height=768 ,width=768 ,strength=0.2 ,output_type='np' ,)
A = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_ ,A_ ) | 22 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''spm_char.model'''}
_lowercase = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
_lowercase = {
'''microsoft/speecht5_asr''': 10_24,
'''microsoft/speecht5_tts''': 10_24,
'''microsoft/speecht5_vc''': 10_24,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[Any]="</s>" ,A_ : Optional[Any]="<unk>" ,A_ : str="<pad>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[str] ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Any:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Optional[int] ,A_ : str ) -> Tuple:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> List[Any]:
A = self.sp_model.IdToPiece(A_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]:
A = []
A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
A = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : Optional[int]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ )
A = [1]
if token_ids_a is None:
return ([0] * len(A_ )) + suffix_ones
return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 22 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {"""vocab_file""": """spiece.model"""}
_A = {
"""vocab_file""": {
"""bert_for_seq_generation""": (
"""https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"""
),
}
}
_A = {"""bert_for_seq_generation""": 5_12}
class SCREAMING_SNAKE_CASE_ ( snake_case ):
__a : Tuple = VOCAB_FILES_NAMES
__a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : List[int] = []
__a : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<::::>" , lowercase = None , **lowercase , ) -> None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , sep_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
__SCREAMING_SNAKE_CASE : str = vocab_file
__SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
@property
def _snake_case ( self ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _snake_case ( self ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = self.__dict__.copy()
__SCREAMING_SNAKE_CASE : Optional[int] = None
return state
def __setstate__( self , lowercase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE : Dict = {}
__SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , lowercase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowercase , out_type=lowercase )
def _snake_case ( self , lowercase ) -> int:
'''simple docstring'''
return self.sp_model.piece_to_id(lowercase )
def _snake_case ( self , lowercase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.IdToPiece(lowercase )
return token
def _snake_case ( self , lowercase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
__SCREAMING_SNAKE_CASE : str = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(
lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , '''wb''' ) as fi:
__SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 158 |
'''simple docstring'''
def A_ ( __SCREAMING_SNAKE_CASE : int ) -> bool:
if num < 0:
return False
__SCREAMING_SNAKE_CASE : int = num
__SCREAMING_SNAKE_CASE : int = 0
while num > 0:
__SCREAMING_SNAKE_CASE : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 1 , _UpperCAmelCase = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
return (pow(__a , 2 ) + step) % modulus
for _ in range(__a ):
# These track the position within the cycle detection logic.
A_ : int = seed
A_ : List[Any] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
A_ : Dict = rand_fn(__a , __a , __a )
A_ : Any = rand_fn(__a , __a , __a )
A_ : Optional[int] = rand_fn(__a , __a , __a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
A_ : Optional[int] = gcd(hare - tortoise , __a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
A_ : Union[str, Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
lowerCamelCase_ : Tuple = parser.parse_args()
lowerCamelCase_ : int = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"{args.num} is probably prime")
else:
lowerCamelCase_ : str = args.num // divisor
print(F"{args.num} = {divisor} * {quotient}")
| 720 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase_ : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
lowerCamelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 302 | 0 |
import fire
from utils import calculate_rouge, save_json
def a__ ( lowercase__ , lowercase__ , lowercase__=None , **lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[x.strip() for x in open(lowercase__ ).readlines()]
UpperCAmelCase_ =[x.strip() for x in open(lowercase__ ).readlines()][: len(lowercase__ )]
UpperCAmelCase_ =calculate_rouge(lowercase__ , lowercase__ , **lowercase__ )
if save_path is not None:
save_json(lowercase__ , lowercase__ , indent=lowercase__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 54 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __lowercase , unittest.TestCase ):
_snake_case =KandinskyVaaImgaImgPipeline
_snake_case =['''image_embeds''', '''negative_image_embeds''', '''image''']
_snake_case =[
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_snake_case =[
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case =False
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
return 100
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ ={
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ =UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def lowerCAmelCase__ ( self: Any ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.dummy_unet
UpperCAmelCase_ =self.dummy_movq
UpperCAmelCase_ ={
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCAmelCase_ =DDIMScheduler(**_lowerCAmelCase )
UpperCAmelCase_ ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[Any]=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
# create init_image
UpperCAmelCase_ =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" ).resize((256, 256) )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ ="cpu"
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =self.pipeline_class(**_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ =np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase_ ="A red cartoon frog, 4k"
UpperCAmelCase_ =KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
UpperCAmelCase_ =KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ =pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ =pipeline(
image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
UpperCAmelCase_ =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 54 | 1 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase : int = """"""
__lowerCamelCase : List[str] = """"""
__lowerCamelCase : List[str] = []
__lowerCamelCase : int = 0
__lowerCamelCase : List[Any] = 256
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : List[str] = 0
__lowerCamelCase : int = 0
def a_ ( self : Tuple , A__ : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = cva.imread(_lowerCAmelCase , 0 )
__lowerCamelCase : List[str] = copy.deepcopy(self.img )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" )
__lowerCamelCase : Optional[Any] = np.sum(_lowerCAmelCase )
for i in range(len(_lowerCAmelCase ) ):
__lowerCamelCase : List[Any] = x[i] / self.k
self.sk += prk
__lowerCamelCase : Tuple = (self.L - 1) * self.sk
if self.rem != 0:
__lowerCamelCase : int = int(last % last )
__lowerCamelCase : Dict = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_lowerCAmelCase )
__lowerCamelCase : List[str] = int(np.ma.count(self.img ) / self.img[1].size )
__lowerCamelCase : Optional[Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__lowerCamelCase : List[str] = self.img[j][i]
if num != self.last_list[num]:
__lowerCamelCase : int = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def a_ ( self : Any ):
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def a_ ( self : int ):
"""simple docstring"""
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCAmelCase__ :int = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCAmelCase__ :List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 709 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : Dict = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
__lowerCamelCase : Tuple = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
sd_pipe.set_scheduler("""sample_euler""" )
__lowerCamelCase : Any = """A painting of a squirrel eating a burger"""
__lowerCamelCase : List[Any] = torch.manual_seed(0 )
__lowerCamelCase : Dict = sd_pipe([prompt] , generator=A__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
__lowerCamelCase : Tuple = output.images
__lowerCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase : List[str] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a_ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase : Any = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
__lowerCamelCase : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
sd_pipe.set_scheduler("""sample_euler""" )
__lowerCamelCase : List[Any] = """A painting of a squirrel eating a burger"""
__lowerCamelCase : Union[str, Any] = torch.manual_seed(0 )
__lowerCamelCase : int = sd_pipe([prompt] , generator=A__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
__lowerCamelCase : List[str] = output.images
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase : Optional[int] = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def a_ ( self : str ):
"""simple docstring"""
__lowerCamelCase : List[str] = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
__lowerCamelCase : List[str] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
__lowerCamelCase : int = """A painting of a squirrel eating a burger"""
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = sd_pipe(
[prompt] , generator=A__ , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=A__ , )
__lowerCamelCase : int = output.images
__lowerCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase : int = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 483 | 0 |
"""simple docstring"""
def UpperCamelCase__ ( lowercase__ : str ):
snake_case : str = [int(lowercase__ ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(lowercase__ ) == 4 and all(0 <= int(lowercase__ ) <= 254 for octet in octets )
if __name__ == "__main__":
__A = input().strip()
__A = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(f'{ip} is a {valid_or_invalid} IP v4 address.')
| 134 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 134 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__lowerCamelCase = get_tests_dir('fixtures')
class UpperCamelCase_ ( unittest.TestCase ):
def snake_case__( self ) -> str:
# A mock response for an HTTP head request to emulate server down
_a : Any = mock.Mock()
_a : int = 500
_a : str = {}
_a : List[str] = HTTPError
_a : List[str] = {}
# Download this model to make sure it's in the cache.
_a : str = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowercase ) as mock_head:
_a : List[str] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case__( self ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
_a : List[str] = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def snake_case__( self ) -> List[str]:
with self.assertRaises(lowercase ):
# config is in subfolder, the following should not work without specifying the subfolder
_a : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
_a : List[str] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(lowercase )
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
@classmethod
def snake_case__( cls ) -> Optional[Any]:
_a : List[Any] = TOKEN
HfFolder.save_token(lowercase )
@classmethod
def snake_case__( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def snake_case__( self ) -> str:
_a : List[Any] = ViTImageProcessor.from_pretrained(lowercase )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
_a : Optional[int] = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowercase , repo_id='''test-image-processor''' , push_to_hub=lowercase , use_auth_token=self._token )
_a : str = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
def snake_case__( self ) -> int:
_a : List[str] = ViTImageProcessor.from_pretrained(lowercase )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
_a : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowercase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=lowercase , use_auth_token=self._token )
_a : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
def snake_case__( self ) -> Union[str, Any]:
CustomImageProcessor.register_for_auto_class()
_a : List[Any] = CustomImageProcessor.from_pretrained(lowercase )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(
F'{USER}/test-dynamic-image-processor' , trust_remote_code=lowercase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' ) | 708 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__lowerCamelCase = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__lowerCamelCase = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def UpperCamelCase__ ( UpperCAmelCase ) -> str:
"""simple docstring"""
_a : Optional[int] = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCAmelCase )[0]
@deprecated(UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def UpperCamelCase__ ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase ) as bytestream:
_a : List[Any] = _readaa(UpperCAmelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
_a : Any = _readaa(UpperCAmelCase )
_a : Optional[int] = _readaa(UpperCAmelCase )
_a : Optional[int] = _readaa(UpperCAmelCase )
_a : Tuple = bytestream.read(rows * cols * num_images )
_a : int = numpy.frombuffer(UpperCAmelCase , dtype=numpy.uinta )
_a : List[Any] = data.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , 1 )
return data
@deprecated(UpperCAmelCase , '''Please use tf.one_hot on tensors.''' )
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
_a : List[Any] = labels_dense.shape[0]
_a : List[str] = numpy.arange(UpperCAmelCase ) * num_classes
_a : List[str] = numpy.zeros((num_labels, num_classes) )
_a : Dict = 1
return labels_one_hot
@deprecated(UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=10 ) -> str:
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase ) as bytestream:
_a : List[Any] = _readaa(UpperCAmelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
_a : str = _readaa(UpperCAmelCase )
_a : Dict = bytestream.read(UpperCAmelCase )
_a : List[str] = numpy.frombuffer(UpperCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(UpperCAmelCase , UpperCAmelCase )
return labels
class UpperCamelCase_ :
@deprecated(
lowercase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self , lowercase , lowercase , lowercase=False , lowercase=False , lowercase=dtypes.floataa , lowercase=True , lowercase=None , ) -> Dict:
_a , _a : int = random_seed.get_seed(lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_a : str = dtypes.as_dtype(lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
_a : int = 10_000
_a : List[str] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
_a : Dict = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_a : Union[str, Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_a : List[str] = images.astype(numpy.floataa )
_a : Any = numpy.multiply(lowercase , 1.0 / 255.0 )
_a : Any = images
_a : Tuple = labels
_a : str = 0
_a : Dict = 0
@property
def snake_case__( self ) -> Tuple:
return self._images
@property
def snake_case__( self ) -> Optional[Any]:
return self._labels
@property
def snake_case__( self ) -> Tuple:
return self._num_examples
@property
def snake_case__( self ) -> Optional[Any]:
return self._epochs_completed
def snake_case__( self , lowercase , lowercase=False , lowercase=True ) -> int:
if fake_data:
_a : Optional[Any] = [1] * 784
_a : Optional[int] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowercase )],
[fake_label for _ in range(lowercase )],
)
_a : Union[str, Any] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_a : Tuple = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase )
_a : Any = self.images[perma]
_a : Union[str, Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_a : Any = self._num_examples - start
_a : Any = self._images[start : self._num_examples]
_a : Optional[Any] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_a : Tuple = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase )
_a : Union[str, Any] = self.images[perm]
_a : str = self.labels[perm]
# Start next epoch
_a : List[Any] = 0
_a : Optional[int] = batch_size - rest_num_examples
_a : int = self._index_in_epoch
_a : str = self._images[start:end]
_a : Union[str, Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_a : Optional[int] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(UpperCAmelCase , '''Please write your own downloading logic.''' )
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
if not gfile.Exists(UpperCAmelCase ):
gfile.MakeDirs(UpperCAmelCase )
_a : Tuple = os.path.join(UpperCAmelCase , UpperCAmelCase )
if not gfile.Exists(UpperCAmelCase ):
urllib.request.urlretrieve(UpperCAmelCase , UpperCAmelCase ) # noqa: S310
with gfile.GFile(UpperCAmelCase ) as f:
_a : Union[str, Any] = f.size()
print('''Successfully downloaded''' , UpperCAmelCase , UpperCAmelCase , '''bytes.''' )
return filepath
@deprecated(
UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=dtypes.floataa , UpperCAmelCase=True , UpperCAmelCase=5000 , UpperCAmelCase=None , UpperCAmelCase=DEFAULT_SOURCE_URL , ) -> Any:
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=UpperCAmelCase , one_hot=UpperCAmelCase , dtype=UpperCAmelCase , seed=UpperCAmelCase )
_a : Dict = fake()
_a : Union[str, Any] = fake()
_a : Any = fake()
return _Datasets(train=UpperCAmelCase , validation=UpperCAmelCase , test=UpperCAmelCase )
if not source_url: # empty string check
_a : List[str] = DEFAULT_SOURCE_URL
_a : int = '''train-images-idx3-ubyte.gz'''
_a : Optional[int] = '''train-labels-idx1-ubyte.gz'''
_a : Union[str, Any] = '''t10k-images-idx3-ubyte.gz'''
_a : int = '''t10k-labels-idx1-ubyte.gz'''
_a : Tuple = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + train_images_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
_a : Union[str, Any] = _extract_images(UpperCAmelCase )
_a : Dict = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + train_labels_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
_a : Optional[int] = _extract_labels(UpperCAmelCase , one_hot=UpperCAmelCase )
_a : str = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + test_images_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
_a : Any = _extract_images(UpperCAmelCase )
_a : Optional[Any] = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + test_labels_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
_a : Tuple = _extract_labels(UpperCAmelCase , one_hot=UpperCAmelCase )
if not 0 <= validation_size <= len(UpperCAmelCase ):
_a : Union[str, Any] = (
'''Validation size should be between 0 and '''
F'{len(UpperCAmelCase )}. Received: {validation_size}.'
)
raise ValueError(UpperCAmelCase )
_a : str = train_images[:validation_size]
_a : List[Any] = train_labels[:validation_size]
_a : Union[str, Any] = train_images[validation_size:]
_a : List[str] = train_labels[validation_size:]
_a : Tuple = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
_a : Optional[Any] = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
_a : Union[str, Any] = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
_a : Union[str, Any] = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
return _Datasets(train=UpperCAmelCase , validation=UpperCAmelCase , test=UpperCAmelCase ) | 307 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = ShapEPipeline
lowerCamelCase__ = ['prompt']
lowerCamelCase__ = ['prompt']
lowerCamelCase__ = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowerCamelCase__ = False
@property
def snake_case__ ( self):
'''simple docstring'''
return 32
@property
def snake_case__ ( self):
'''simple docstring'''
return 32
@property
def snake_case__ ( self):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case__ ( self):
'''simple docstring'''
return 8
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModelWithProjection(__a)
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : Tuple = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
_lowerCAmelCase : List[Any] = PriorTransformer(**__a)
return model
@property
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : Optional[Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : str = ShapERenderer(**__a)
return model
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.dummy_prior
_lowerCAmelCase : List[Any] = self.dummy_text_encoder
_lowerCAmelCase : int = self.dummy_tokenizer
_lowerCAmelCase : Union[str, Any] = self.dummy_renderer
_lowerCAmelCase : Optional[int] = HeunDiscreteScheduler(
beta_schedule="exp", num_train_timesteps=1024, prediction_type="sample", use_karras_sigmas=__a, clip_sample=__a, clip_sample_range=1.0, )
_lowerCAmelCase : Optional[int] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def snake_case__ ( self, __a, __a=0):
'''simple docstring'''
if str(__a).startswith("mps"):
_lowerCAmelCase : Tuple = torch.manual_seed(__a)
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=__a).manual_seed(__a)
_lowerCAmelCase : Any = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = "cpu"
_lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = self.pipeline_class(**__a)
_lowerCAmelCase : int = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : List[str] = pipe(**self.get_dummy_inputs(__a))
_lowerCAmelCase : List[Any] = output.images[0]
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Any = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = torch_device == "cpu"
_lowerCAmelCase : Optional[int] = True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=__a, relax_max_difference=__a, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : List[str] = self.pipeline_class(**__a)
_lowerCAmelCase : List[Any] = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(__a)
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : List[Any] = batch_size * [inputs[key]]
_lowerCAmelCase : Optional[int] = pipe(**__a, num_images_per_prompt=__a)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy")
_lowerCAmelCase : Union[str, Any] = ShapEPipeline.from_pretrained("openai/shap-e")
_lowerCAmelCase : Dict = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Optional[int] = torch.Generator(device=__a).manual_seed(0)
_lowerCAmelCase : Optional[Any] = pipe(
"a shark", generator=__a, guidance_scale=15.0, num_inference_steps=64, frame_size=64, output_type="np", ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__a, __a)
| 500 |
from math import ceil, sqrt
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_lowerCAmelCase : Optional[int] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_lowerCAmelCase : Tuple = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 500 | 1 |
import numpy as np
def UpperCamelCase_ ( a_ ) ->np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__:
"""simple docstring"""
_A = 42
_A = None
_A = None
__a = namedtuple("""CoinsDistribResult""", """moves excess""")
def UpperCamelCase_ ( a_ ) ->int:
if root is None:
return 0
# Validation
def count_nodes(a_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a_ ) != count_coins(a_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(a_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
A , A =get_distrib(node.left )
A , A =get_distrib(node.right )
A =1 - left_distrib_excess
A =1 - right_distrib_excess
A =(
left_distrib_moves
+ right_distrib_moves
+ abs(a_ )
+ abs(a_ )
)
A =node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a_ , a_ )
return get_distrib(a_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Dict = 'longformer'
def __init__( self:str , _a:Union[List[int], int] = 5_12 , _a:int = 2 , _a:int = 1 , _a:int = 0 , _a:int = 2 , _a:int = 3_05_22 , _a:int = 7_68 , _a:int = 12 , _a:int = 12 , _a:int = 30_72 , _a:str = "gelu" , _a:float = 0.1 , _a:float = 0.1 , _a:int = 5_12 , _a:int = 2 , _a:float = 0.02 , _a:float = 1e-12 , _a:bool = False , **_a:Any , ):
super().__init__(pad_token_id=_a , **_a )
snake_case__ = attention_window
snake_case__ = sep_token_id
snake_case__ = bos_token_id
snake_case__ = eos_token_id
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = hidden_act
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = onnx_export
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:Tuple , _a:"PretrainedConfig" , _a:str = "default" , _a:"List[PatchingSpec]" = None ):
super().__init__(_a , _a , _a )
snake_case__ = True
@property
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
if self.task == "multiple-choice":
snake_case__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = super().outputs
if self.task == "default":
snake_case__ = {0: '''batch'''}
return outputs
@property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return 1e-4
@property
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:"PreTrainedTokenizerBase" , _a:int = -1 , _a:int = -1 , _a:bool = False , _a:Optional[TensorType] = None , ):
snake_case__ = super().generate_dummy_inputs(
preprocessor=_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
snake_case__ = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
snake_case__ = 1
return inputs
| 33 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int ):
if not isinstance(lowerCamelCase__, lowerCamelCase__ ):
raise TypeError("only integers accepted as input" )
else:
_a = str(abs(lowerCamelCase__ ) )
_a = [list(lowerCamelCase__ ) for char in range(len(lowerCamelCase__ ) )]
for index in range(len(lowerCamelCase__ ) ):
num_transpositions[index].pop(lowerCamelCase__ )
return max(
int("".join(list(lowerCamelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 131 | 0 |
'''simple docstring'''
from __future__ import annotations
def __a ( __lowerCamelCase : int ) -> list[int]:
'''simple docstring'''
lowercase_ = [True] * limit
lowercase_ = False
lowercase_ = False
lowercase_ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowercase_ = i * 2
while index < limit:
lowercase_ = False
lowercase_ = index + i
lowercase_ = [2]
for i in range(3 , __lowerCamelCase , 2 ):
if is_prime[i]:
primes.append(__lowerCamelCase )
return primes
def __a ( __lowerCamelCase : int = 1_000_000 ) -> int:
'''simple docstring'''
lowercase_ = prime_sieve(__lowerCamelCase )
lowercase_ = 0
lowercase_ = 0
for i in range(len(__lowerCamelCase ) ):
for j in range(i + length , len(__lowerCamelCase ) ):
lowercase_ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase_ = j - i
lowercase_ = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''')
| 717 | '''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowercase :
def __init__( self : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any=14 , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Any=99 , __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : str=3 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> List[Any]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_token_type_ids
lowercase_ = use_input_mask
lowercase_ = use_labels
lowercase_ = use_mc_token_ids
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
lowercase_ = self.vocab_size - 1
def __UpperCAmelCase ( self : Tuple) -> int:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length])
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = None
if self.use_mc_token_ids:
lowercase_ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length)
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = self.get_config()
lowercase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , *__lowerCAmelCase : Tuple) -> List[str]:
lowercase_ = CTRLModel(config=__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , head_mask=__lowerCAmelCase)
model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase)
lowercase_ = model(__lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values) , config.n_layer)
def __UpperCAmelCase ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict , *__lowerCAmelCase : int) -> int:
lowercase_ = CTRLLMHeadModel(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowercase_ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCAmelCase ( self : Dict) -> int:
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def __UpperCAmelCase ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , *__lowerCAmelCase : List[Any]) -> int:
lowercase_ = self.num_labels
lowercase_ = CTRLForSequenceClassification(__lowerCAmelCase)
model.to(__lowerCAmelCase)
model.eval()
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
@require_torch
class lowercase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
lowerCamelCase_ =(CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCamelCase_ =(CTRLLMHeadModel,) if is_torch_available() else ()
lowerCamelCase_ =(
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ =True
lowerCamelCase_ =False
lowerCamelCase_ =False
def __UpperCAmelCase ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int) -> Optional[int]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __UpperCAmelCase ( self : str) -> Tuple:
lowercase_ = CTRLModelTester(self)
lowercase_ = ConfigTester(self , config_class=__lowerCAmelCase , n_embd=37)
def __UpperCAmelCase ( self : Dict) -> Optional[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : str) -> List[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Any) -> List[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__lowerCAmelCase)
def __UpperCAmelCase ( self : Tuple) -> Dict:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCAmelCase)
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]:
pass
@slow
def __UpperCAmelCase ( self : Dict) -> List[str]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = CTRLModel.from_pretrained(__lowerCAmelCase)
self.assertIsNotNone(__lowerCAmelCase)
@unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :)
def __UpperCAmelCase ( self : Optional[Any]) -> Dict:
pass
@require_torch
class lowercase ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int]) -> Any:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __UpperCAmelCase ( self : int) -> Any:
lowercase_ = CTRLLMHeadModel.from_pretrained("ctrl")
model.to(__lowerCAmelCase)
lowercase_ = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=__lowerCAmelCase) # Legal the president is
lowercase_ = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowercase_ = model.generate(__lowerCAmelCase , do_sample=__lowerCAmelCase)
self.assertListEqual(output_ids[0].tolist() , __lowerCAmelCase)
| 461 | 0 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCamelCase__ ( snake_case__):
"""simple docstring"""
__UpperCAmelCase = (DPMSolverSDEScheduler,)
__UpperCAmelCase = 10
def a__ ( self : Optional[Any] , **UpperCamelCase_ : Tuple ):
'''simple docstring'''
__magic_name__ = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**lowerCAmelCase__ )
return config
def a__ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def a__ ( self : str ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def a__ ( self : List[str] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def a__ ( self : List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def a__ ( self : Optional[Any] ):
'''simple docstring'''
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma
__magic_name__ = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = model(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(lowerCAmelCase__ ) )
__magic_name__ = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1e-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1e-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3
def a__ ( self : str ):
'''simple docstring'''
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config(prediction_type='v_prediction' )
__magic_name__ = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma
__magic_name__ = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = model(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(lowerCAmelCase__ ) )
__magic_name__ = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1e-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1e-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1e-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1e-3
def a__ ( self : int ):
'''simple docstring'''
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter.to(lowerCAmelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__magic_name__ = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = model(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(lowerCAmelCase__ ) )
__magic_name__ = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1e-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1e-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3
def a__ ( self : Tuple ):
'''simple docstring'''
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter.to(lowerCAmelCase__ ) * scheduler.init_noise_sigma
__magic_name__ = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
__magic_name__ = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = model(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(lowerCAmelCase__ ) )
__magic_name__ = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2 | 545 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowercase = '''src/diffusers'''
_lowercase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
_lowercase = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowercase = spec.loader.load_module()
def UpperCamelCase ( snake_case__ , snake_case__):
return line.startswith(snake_case__) or len(snake_case__) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , snake_case__) is not None
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Tuple = object_name.split(".")
lowerCAmelCase_ : Union[str, Any] = 0
# First let's find the module where our object lives.
lowerCAmelCase_ : Union[str, Any] = parts[i]
while i < len(snake_case__) and not os.path.isfile(os.path.join(snake_case__ , F'''{module}.py''')):
i += 1
if i < len(snake_case__):
lowerCAmelCase_ : Dict = os.path.join(snake_case__ , parts[i])
if i >= len(snake_case__):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''')
with open(os.path.join(snake_case__ , F'''{module}.py''') , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case__) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case__):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''')
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase_ : Union[str, Any] = line_index
while line_index < len(snake_case__) and _should_continue(lines[line_index] , snake_case__):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : List[str] = lines[start_index:line_index]
return "".join(snake_case__)
_lowercase = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
_lowercase = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
_lowercase = re.compile(r'''<FILL\s+[^>]*>''')
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Any = code.split("\n")
lowerCAmelCase_ : Any = 0
while idx < len(snake_case__) and len(lines[idx]) == 0:
idx += 1
if idx < len(snake_case__):
return re.search(R"^(\s*)\S" , lines[idx]).groups()[0]
return ""
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Dict = len(get_indent(snake_case__)) > 0
if has_indent:
lowerCAmelCase_ : Dict = F'''class Bla:\n{code}'''
lowerCAmelCase_ : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=snake_case__)
lowerCAmelCase_ : Optional[Any] = black.format_str(snake_case__ , mode=snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = style_docstrings_in_code(snake_case__)
return result[len("class Bla:\n") :] if has_indent else result
def UpperCamelCase ( snake_case__ , snake_case__=False):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Tuple = f.readlines()
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Union[str, Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case__):
lowerCAmelCase_ : Optional[int] = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = search.groups()
lowerCAmelCase_ : int = find_code_in_diffusers(snake_case__)
lowerCAmelCase_ : Dict = get_indent(snake_case__)
lowerCAmelCase_ : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase_ : str = theoretical_indent
lowerCAmelCase_ : Union[str, Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase_ : Optional[int] = True
while line_index < len(snake_case__) and should_continue:
line_index += 1
if line_index >= len(snake_case__):
break
lowerCAmelCase_ : Dict = lines[line_index]
lowerCAmelCase_ : List[str] = _should_continue(snake_case__ , snake_case__) and re.search(F'''^{indent}# End copy''' , snake_case__) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : Dict = lines[start_index:line_index]
lowerCAmelCase_ : Optional[int] = "".join(snake_case__)
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase_ : List[Any] = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(snake_case__) is None]
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case__) > 0:
lowerCAmelCase_ : List[str] = replace_pattern.replace("with" , "").split(",")
lowerCAmelCase_ : Tuple = [_re_replace_pattern.search(snake_case__) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = pattern.groups()
lowerCAmelCase_ : int = re.sub(snake_case__ , snake_case__ , snake_case__)
if option.strip() == "all-casing":
lowerCAmelCase_ : List[str] = re.sub(obja.lower() , obja.lower() , snake_case__)
lowerCAmelCase_ : int = re.sub(obja.upper() , obja.upper() , snake_case__)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase_ : List[Any] = blackify(lines[start_index - 1] + theoretical_code)
lowerCAmelCase_ : Union[str, Any] = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
lowerCAmelCase_ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase_ : Union[str, Any] = start_index + 1
if overwrite and len(snake_case__) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''')
with open(snake_case__ , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(snake_case__)
return diffs
def UpperCamelCase ( snake_case__ = False):
lowerCAmelCase_ : Tuple = glob.glob(os.path.join(snake_case__ , "**/*.py") , recursive=snake_case__)
lowerCAmelCase_ : int = []
for filename in all_files:
lowerCAmelCase_ : Union[str, Any] = is_copy_consistent(snake_case__ , snake_case__)
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.")
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowercase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 659 | 0 |
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> dict[str, float]:
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCamelCase__ , 2 ) + pow(UpperCamelCase__ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146 |
def lowerCAmelCase ( UpperCamelCase__ : int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = [1]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = 0, 0, 0
__SCREAMING_SNAKE_CASE: str = ugly_nums[ia] * 2
__SCREAMING_SNAKE_CASE: int = ugly_nums[ia] * 3
__SCREAMING_SNAKE_CASE: Optional[int] = ugly_nums[ia] * 5
for _ in range(1 , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE: str = min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ugly_nums.append(UpperCamelCase__ )
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE: List[Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE: List[Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE: int = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 146 | 1 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 66 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=True ,UpperCAmelCase="pt" ):
'''simple docstring'''
A__ = {'add_prefix_space': True} if isinstance(UpperCAmelCase ,UpperCAmelCase ) and not line.startswith(' ' ) else {}
A__ = padding_side
return tokenizer(
[line] ,max_length=UpperCAmelCase ,padding='max_length' if pad_to_max_length else None ,truncation=UpperCAmelCase ,return_tensors=UpperCAmelCase ,add_special_tokens=UpperCAmelCase ,**UpperCAmelCase ,)
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=None ,):
'''simple docstring'''
A__ = input_ids.ne(UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _snake_case( UpperCAmelCase ):
def __init__(self : List[str] , a : Any , a : Union[str, Any] , a : int , a : Optional[int] , a : Optional[Any]="train" , a : Optional[int]=None , a : Tuple=None , a : str=None , a : Dict="" , ) -> Dict:
"""simple docstring"""
super().__init__()
A__ = Path(a ).joinpath(type_path + '.source' )
A__ = Path(a ).joinpath(type_path + '.target' )
A__ = self.get_char_lens(self.src_file )
A__ = max_source_length
A__ = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
A__ = tokenizer
A__ = prefix
if n_obs is not None:
A__ = self.src_lens[:n_obs]
A__ = src_lang
A__ = tgt_lang
def __len__(self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return len(self.src_lens )
def __getitem__(self : Any , a : str ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A__ = index + 1 # linecache starts at 1
A__ = self.prefix + linecache.getline(str(self.src_file ) , a ).rstrip('\n' )
A__ = linecache.getline(str(self.tgt_file ) , a ).rstrip('\n' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A__ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , a ) else self.tokenizer
)
A__ = self.tokenizer.generator if isinstance(self.tokenizer , a ) else self.tokenizer
A__ = encode_line(a , a , self.max_source_length , 'right' )
A__ = encode_line(a , a , self.max_target_length , 'right' )
A__ = source_inputs['input_ids'].squeeze()
A__ = target_inputs['input_ids'].squeeze()
A__ = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _UpperCamelCase (a : Tuple ) -> Tuple:
"""simple docstring"""
return [len(a ) for x in Path(a ).open().readlines()]
def _UpperCamelCase (self : str , a : int ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
A__ = torch.stack([x['input_ids'] for x in batch] )
A__ = torch.stack([x['attention_mask'] for x in batch] )
A__ = torch.stack([x['decoder_input_ids'] for x in batch] )
A__ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , a )
else self.tokenizer.pad_token_id
)
A__ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , a )
else self.tokenizer.pad_token_id
)
A__ = trim_batch(a , a )
A__ , A__ = trim_batch(a , a , attention_mask=a )
A__ = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCAmelCase_ = getLogger(__name__)
def _A ( UpperCAmelCase ):
'''simple docstring'''
return list(itertools.chain.from_iterable(UpperCAmelCase ) )
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = get_git_info()
save_json(UpperCAmelCase ,os.path.join(UpperCAmelCase ,'git_log.json' ) )
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=4 ,**UpperCAmelCase ):
'''simple docstring'''
with open(UpperCAmelCase ,'w' ) as f:
json.dump(UpperCAmelCase ,UpperCAmelCase ,indent=UpperCAmelCase ,**UpperCAmelCase )
def _A ( UpperCAmelCase ):
'''simple docstring'''
with open(UpperCAmelCase ) as f:
return json.load(UpperCAmelCase )
def _A ( ):
'''simple docstring'''
A__ = git.Repo(search_parent_directories=UpperCAmelCase )
A__ = {
'repo_id': str(UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
return list(map(UpperCAmelCase ,UpperCAmelCase ) )
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
with open(UpperCAmelCase ,'wb' ) as f:
return pickle.dump(UpperCAmelCase ,UpperCAmelCase )
def _A ( UpperCAmelCase ):
'''simple docstring'''
def remove_articles(UpperCAmelCase ):
return re.sub(r'\b(a|an|the)\b' ,' ' ,UpperCAmelCase )
def white_space_fix(UpperCAmelCase ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase ):
A__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase ) ) ) )
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = normalize_answer(UpperCAmelCase ).split()
A__ = normalize_answer(UpperCAmelCase ).split()
A__ = Counter(UpperCAmelCase ) & Counter(UpperCAmelCase )
A__ = sum(common.values() )
if num_same == 0:
return 0
A__ = 1.0 * num_same / len(UpperCAmelCase )
A__ = 1.0 * num_same / len(UpperCAmelCase )
A__ = (2 * precision * recall) / (precision + recall)
return fa
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
return normalize_answer(UpperCAmelCase ) == normalize_answer(UpperCAmelCase )
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
A__ = 0
for hypo, pred in zip(UpperCAmelCase ,UpperCAmelCase ):
em += exact_match_score(UpperCAmelCase ,UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
em /= len(UpperCAmelCase )
return {"em": em}
def _A ( UpperCAmelCase ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A__ = 'dropout_rate'
for p in extra_params:
if getattr(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ):
if not hasattr(UpperCAmelCase ,UpperCAmelCase ) and not hasattr(UpperCAmelCase ,equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(UpperCAmelCase ) )
delattr(UpperCAmelCase ,UpperCAmelCase )
continue
A__ = p if hasattr(UpperCAmelCase ,UpperCAmelCase ) else equivalent_param[p]
setattr(UpperCAmelCase ,UpperCAmelCase ,getattr(UpperCAmelCase ,UpperCAmelCase ) )
delattr(UpperCAmelCase ,UpperCAmelCase )
return hparams, config
| 531 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__A = '''
Human: <<task>>
Assistant: '''
__A = '''huggingface-tools/default-prompts'''
__A = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def lowercase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Any="run" ) -> Tuple:
'''simple docstring'''
if prompt_or_repo_id is None:
__lowerCamelCase : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCamelCase ) is not None:
return prompt_or_repo_id
__lowerCamelCase : int = cached_file(
_lowerCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 711 | """simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
snake_case__ = ViTImageProcessor if is_vision_available() else None
@property
def lowerCamelCase__ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : str = (3, 32, 128)
__lowerCamelCase : Optional[int] = tempfile.mkdtemp()
# fmt: off
__lowerCamelCase : Tuple = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
__lowerCamelCase : Dict = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
__lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + "\n" )
__lowerCamelCase : int = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 128},
}
__lowerCamelCase : Dict = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase__ ( self : Tuple , **UpperCAmelCase : Optional[int] ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] , **UpperCAmelCase : List[str] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCamelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Any = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__lowerCamelCase : Union[str, Any] = Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) )
return image_input
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Union[str, Any] = self.get_tokenizer()
__lowerCamelCase : Any = self.get_image_processor()
__lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase : str = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase : Dict = self.get_image_processor()
__lowerCamelCase : Optional[int] = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__lowerCamelCase : Any = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
__lowerCamelCase : List[str] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : List[str] = self.get_image_processor()
__lowerCamelCase : Dict = self.get_tokenizer()
__lowerCamelCase : Optional[int] = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__lowerCamelCase : Optional[int] = self.prepare_image_inputs()
__lowerCamelCase : Optional[int] = image_processor(UpperCAmelCase , return_tensors="np" )
__lowerCamelCase : Tuple = processor(images=UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : int = self.get_image_processor()
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase : int = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = "test"
__lowerCamelCase : List[Any] = processor(text=UpperCAmelCase )
__lowerCamelCase : Optional[int] = tokenizer(UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = self.get_image_processor()
__lowerCamelCase : Optional[Any] = self.get_tokenizer()
__lowerCamelCase : Optional[int] = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__lowerCamelCase : str = "test"
__lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
__lowerCamelCase : Optional[Any] = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Any = self.get_image_processor()
__lowerCamelCase : Optional[Any] = self.get_tokenizer()
__lowerCamelCase : Dict = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase : List[Any] = processor.char_decode(UpperCAmelCase )
__lowerCamelCase : Dict = tokenizer.batch_decode(UpperCAmelCase )
__lowerCamelCase : Any = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : str = self.get_image_processor()
__lowerCamelCase : str = self.get_tokenizer()
__lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__lowerCamelCase : Tuple = None
__lowerCamelCase : int = self.prepare_image_inputs()
__lowerCamelCase : List[str] = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Optional[Any] = self.get_image_processor()
__lowerCamelCase : Tuple = self.get_tokenizer()
__lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
__lowerCamelCase : Optional[int] = torch.randn(1 , 27 , 38 )
__lowerCamelCase : List[str] = torch.randn(1 , 27 , 50257 )
__lowerCamelCase : Union[str, Any] = torch.randn(1 , 27 , 30522 )
__lowerCamelCase : str = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] ) | 366 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) ->int:
while second != 0:
lowerCamelCase__ : int =first & second
first ^= second
lowerCamelCase__ : Union[str, Any] =c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = int(input("""Enter the first number: """).strip())
lowerCAmelCase = int(input("""Enter the second number: """).strip())
print(f"""{add(first, second) = }""") | 174 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Tuple ) ->Dict:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
lowerCamelCase__ : Optional[Any] =TOKENIZER_CLASSES
else:
lowerCamelCase__ : Any ={tokenizer_name: getattr(snake_case_ , tokenizer_name + 'Fast' )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
lowerCamelCase__ : Any =TOKENIZER_CLASSES[tokenizer_name]
lowerCamelCase__ : List[Any] =True
if checkpoint_name is None:
lowerCamelCase__ : Union[str, Any] =list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCamelCase__ : Optional[Any] =[checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
lowerCamelCase__ : Dict =tokenizer_class.from_pretrained(snake_case_ , force_download=snake_case_ )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCamelCase__ , lowerCamelCase__ : List[str] =checkpoint.split('/' )
lowerCamelCase__ : Optional[int] =os.path.join(snake_case_ , snake_case_ )
elif add_prefix:
lowerCamelCase__ : Union[str, Any] =checkpoint
lowerCamelCase__ : List[Any] =dump_path
else:
lowerCamelCase__ : str =None
lowerCamelCase__ : Dict =dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCamelCase__ : int =list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCamelCase__ : Union[str, Any] =file_path.split(snake_case_ )[-1][0]
if next_char == "/":
lowerCamelCase__ : Optional[int] =os.path.join(snake_case_ , snake_case_ )
lowerCamelCase__ : int =None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
lowerCamelCase__ : Optional[Any] =tokenizer.save_pretrained(
snake_case_ , legacy_format=snake_case_ , filename_prefix=snake_case_ )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(snake_case_ )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
lowerCAmelCase = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download) | 174 | 1 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
SCREAMING_SNAKE_CASE = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : Dict = list(s_dict.keys() )
for key in keys:
_lowercase : List[Any] = R'.*/layers_(\d+)'
_lowercase : Dict = key
if re.match(__UpperCAmelCase ,__UpperCAmelCase ):
_lowercase : Any = re.sub(R'layers_(\d+)' ,R'block/\1/layer' ,__UpperCAmelCase )
_lowercase : List[Any] = R'(encoder|decoder)\/'
if re.match(__UpperCAmelCase ,__UpperCAmelCase ):
_lowercase : str = re.match(__UpperCAmelCase ,__UpperCAmelCase ).groups()
if groups[0] == "encoder":
_lowercase : List[str] = re.sub(R'/mlp/' ,R'/1/mlp/' ,__UpperCAmelCase )
_lowercase : List[Any] = re.sub(R'/pre_mlp_layer_norm/' ,R'/1/layer_norm/' ,__UpperCAmelCase )
elif groups[0] == "decoder":
_lowercase : Optional[Any] = re.sub(R'/mlp/' ,R'/2/mlp/' ,__UpperCAmelCase )
_lowercase : Any = re.sub(R'/pre_mlp_layer_norm/' ,R'/2/layer_norm/' ,__UpperCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_lowercase : List[Any] = new_key.replace(__UpperCAmelCase ,__UpperCAmelCase )
print(F'''{key} -> {new_key}''' )
_lowercase : Optional[int] = s_dict.pop(__UpperCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_lowercase : Any = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_lowercase : Tuple = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_lowercase : Union[str, Any] = s_dict[key].shape[0]
_lowercase : List[Any] = s_dict[key]
for idx in range(__UpperCAmelCase ):
_lowercase : List[str] = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' ,'nested fstring' )}''' )
s_dict.pop(__UpperCAmelCase )
return s_dict
SCREAMING_SNAKE_CASE = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
import regex as re
with open(__UpperCAmelCase ,'r' ) as f:
_lowercase : Any = f.read()
_lowercase : Union[str, Any] = re.findall(R'(.*) = ([0-9.]*)' ,__UpperCAmelCase )
_lowercase : List[Any] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_lowercase : Union[str, Any] = float(__UpperCAmelCase ) if '.' in value else int(__UpperCAmelCase )
_lowercase : List[Any] = re.findall(R'(.*activations) = \(\'(.*)\',\)' ,__UpperCAmelCase )[0]
_lowercase : List[Any] = str(activation[1] )
_lowercase : List[Any] = num_experts
_lowercase : List[str] = SwitchTransformersConfig(**__UpperCAmelCase )
return config
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase="./" ,__UpperCAmelCase=8 ):
"""simple docstring"""
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
_lowercase : str = checkpoints.load_tax_checkpoint(__UpperCAmelCase )
if gin_file is not None:
_lowercase : Tuple = convert_gin_to_config(__UpperCAmelCase ,__UpperCAmelCase )
else:
_lowercase : Union[str, Any] = SwitchTransformersConfig.from_pretrained(__UpperCAmelCase )
_lowercase : Any = SwitchTransformersForConditionalGeneration(__UpperCAmelCase )
_lowercase : List[str] = flax_params['target']
_lowercase : Optional[Any] = flatten_dict(__UpperCAmelCase ,sep='/' )
_lowercase : Optional[int] = rename_keys(__UpperCAmelCase )
_lowercase : List[Any] = unflatten_dict(__UpperCAmelCase ,sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__UpperCAmelCase ,__UpperCAmelCase )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 283 | """simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = ""
_snake_case = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_snake_case = None # compression type in fsspec. ex: "gzip"
_snake_case = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , lowerCamelCase_ : str = "" , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[dict] = None , **lowerCamelCase_ : List[str] ):
"""simple docstring"""
super().__init__(self , **lowerCamelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_lowercase : Union[str, Any] = fsspec.open(
lowerCamelCase_ , mode='rb' , protocol=lowerCamelCase_ , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_lowercase : str = os.path.basename(self.file.path.split('::' )[0] )
_lowercase : Optional[int] = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
_lowercase : str = None
@classmethod
def __UpperCAmelCase ( cls : int , lowerCamelCase_ : List[str] ):
"""simple docstring"""
return super()._strip_protocol(lowerCamelCase_ ).lstrip('/' )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.dir_cache is None:
_lowercase : Tuple = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
_lowercase : int = {f['name']: f}
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
return self.file.open().read()
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : str = "rb" , lowerCamelCase_ : str=None , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
_lowercase : Union[str, Any] = self._strip_protocol(lowerCamelCase_ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "bz2"
_snake_case = "bz2"
_snake_case = ".bz2"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "gzip"
_snake_case = "gzip"
_snake_case = ".gz"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "lz4"
_snake_case = "lz4"
_snake_case = ".lz4"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "xz"
_snake_case = "xz"
_snake_case = ".xz"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "zstd"
_snake_case = "zstd"
_snake_case = ".zst"
def __init__( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : str = "rb" , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[dict] = None , lowerCamelCase_ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase_ : int , ):
"""simple docstring"""
super().__init__(
fo=lowerCamelCase_ , mode=lowerCamelCase_ , target_protocol=lowerCamelCase_ , target_options=lowerCamelCase_ , block_size=lowerCamelCase_ , **lowerCamelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_lowercase : Any = self.file.__enter__
class _lowerCamelCase :
def __init__( self : Any , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
_lowercase : Tuple = file_
def __enter__( self : str ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : Union[str, Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
self._file.__exit__(*lowerCamelCase_ , **lowerCamelCase_ )
def __iter__( self : Optional[int] ):
"""simple docstring"""
return iter(self._file )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
return next(self._file )
def __getattr__( self : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
return getattr(self._file , lowerCamelCase_ )
def fixed_enter(*lowerCamelCase_ : List[Any] , **lowerCamelCase_ : int ):
return WrappedFile(_enter(*lowerCamelCase_ , **lowerCamelCase_ ) )
_lowercase : Optional[int] = fixed_enter
| 283 | 1 |
'''simple docstring'''
import numpy as np
def _SCREAMING_SNAKE_CASE ( __snake_case : np.ndarray ):
return 1 / (1 + np.exp(-vector ))
def _SCREAMING_SNAKE_CASE ( __snake_case : np.ndarray ):
return vector * sigmoid(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[Any] = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "gpt_neox"
def __init__( self : Optional[int] , __lowerCamelCase : List[str]=50432 , __lowerCamelCase : int=6144 , __lowerCamelCase : Optional[Any]=44 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=24576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : List[Any]=10000 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : str , ):
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = rotary_pct
SCREAMING_SNAKE_CASE = rotary_emb_base
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = use_parallel_residual
SCREAMING_SNAKE_CASE = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def _snake_case ( self : Union[str, Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("type" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor" , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" ) | 16 | 0 |
from __future__ import annotations
from typing import TypedDict
class __A ( UpperCamelCase__ ):
a__ : str
a__ : int
def lowerCAmelCase_ ( snake_case_ : str ) -> list[str]:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : str ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
UpperCAmelCase_ = all_rotations(snake_case_ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCAmelCase_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(snake_case_ ),
}
return response
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : int ) -> str:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
UpperCAmelCase_ = int(snake_case_ )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(snake_case_ ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
UpperCAmelCase_ = [""] * len(snake_case_ )
for _ in range(len(snake_case_ ) ):
for i in range(len(snake_case_ ) ):
UpperCAmelCase_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[Any] ='Provide a string that I will generate its BWT transform: '
SCREAMING_SNAKE_CASE_: Dict =input(entry_msg).strip()
SCREAMING_SNAKE_CASE_: str =bwt_transform(s)
print(
f"Burrows Wheeler transform for string '{s}' results "
f"in '{result['bwt_string']}'"
)
SCREAMING_SNAKE_CASE_: Optional[int] =reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
f"we get original string '{original_string}'"
)
| 706 | '''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class __A ( UpperCamelCase__ ):
def __init__(self : int , __a : Tuple , __a : int ):
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
def __call__(self : Union[str, Any] ):
UpperCAmelCase_ = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
UpperCAmelCase_ = 1
UpperCAmelCase_ = self.unet(__a , __a ).sample
UpperCAmelCase_ = self.scheduler.step(__a , __a , __a ).prev_sample
UpperCAmelCase_ = scheduler_output - scheduler_output + torch.ones_like(__a )
return result
| 415 | 0 |
'''simple docstring'''
import os
from collections.abc import Iterator
def lowercase_ ( __A : str = "." ) -> Iterator[str]:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(__A ):
lowercase : List[Any] =[d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__A )[1] in (".py", ".ipynb"):
yield os.path.join(__A , __A ).lstrip('''./''' )
def lowercase_ ( __A : Dict ) -> int:
"""simple docstring"""
return F'{i * " "}*' if i else "\n##"
def lowercase_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
lowercase : Any =old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__A ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(__A )} {new_part.replace("_" , " " ).title()}' )
return new_path
def lowercase_ ( __A : str = "." ) -> None:
"""simple docstring"""
lowercase : List[str] =''''''
for filepath in sorted(good_file_paths(__A ) ):
lowercase , lowercase : List[Any] =os.path.split(__A )
if filepath != old_path:
lowercase : List[str] =print_path(__A , __A )
lowercase : List[Any] =(filepath.count(os.sep ) + 1) if filepath else 0
lowercase : str =F'{filepath}/{filename}'.replace(''' ''' , '''%20''' )
lowercase : List[str] =os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(F'{md_prefix(__A )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('.')
| 94 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase=0.9_99 , _lowerCAmelCase="cosine" , ) -> int:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCAmelCase ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__snake_case = []
for i in range(_lowerCAmelCase ):
__snake_case = i / num_diffusion_timesteps
__snake_case = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCAmelCase ) / alpha_bar_fn(_lowerCAmelCase ) , _lowerCAmelCase ) )
return torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
class UpperCamelCase( _a , _a ):
snake_case_ : Tuple = [e.name for e in KarrasDiffusionSchedulers]
snake_case_ : Union[str, Any] = 2
@register_to_config
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : int = 1_0_0_0 , SCREAMING_SNAKE_CASE : float = 0.00085 , SCREAMING_SNAKE_CASE : float = 0.012 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : str = "linspace" , SCREAMING_SNAKE_CASE : int = 0 , ) -> Optional[int]:
'''simple docstring'''
if trained_betas is not None:
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "linear":
__snake_case = torch.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case = betas_for_alpha_bar(SCREAMING_SNAKE_CASE )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__snake_case = 1.0 - self.betas
__snake_case = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int=None ) -> Optional[int]:
'''simple docstring'''
if schedule_timesteps is None:
__snake_case = self.timesteps
__snake_case = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__snake_case = 1 if len(SCREAMING_SNAKE_CASE ) > 1 else 0
else:
__snake_case = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE ) else timestep
__snake_case = self._index_counter[timestep_int]
return indices[pos].item()
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> int:
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
'''simple docstring'''
__snake_case = self.index_for_timestep(SCREAMING_SNAKE_CASE )
if self.state_in_first_order:
__snake_case = self.sigmas[step_index]
else:
__snake_case = self.sigmas_interpol[step_index]
__snake_case = sample / ((sigma**2 + 1) ** 0.5)
return sample
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, torch.device] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , ) -> Optional[int]:
'''simple docstring'''
__snake_case = num_inference_steps
__snake_case = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__snake_case = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__snake_case = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case = (np.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__snake_case = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case = (np.arange(SCREAMING_SNAKE_CASE , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__snake_case = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__snake_case = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
__snake_case = np.interp(SCREAMING_SNAKE_CASE , np.arange(0 , len(SCREAMING_SNAKE_CASE ) ) , SCREAMING_SNAKE_CASE )
__snake_case = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__snake_case = torch.from_numpy(SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE )
# interpolate sigmas
__snake_case = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__snake_case = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__snake_case = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
# mps does not support float64
__snake_case = torch.from_numpy(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE , dtype=torch.floataa )
else:
__snake_case = torch.from_numpy(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
# interpolate timesteps
__snake_case = self.sigma_to_t(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE , dtype=timesteps.dtype )
__snake_case = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__snake_case = torch.cat([timesteps[:1], interleaved_timesteps] )
__snake_case = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__snake_case = defaultdict(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = sigma.log()
# get distribution
__snake_case = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__snake_case = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__snake_case = low_idx + 1
__snake_case = self.log_sigmas[low_idx]
__snake_case = self.log_sigmas[high_idx]
# interpolate sigmas
__snake_case = (low - log_sigma) / (low - high)
__snake_case = w.clamp(0 , 1 )
# transform interpolation to time range
__snake_case = (1 - w) * low_idx + w * high_idx
__snake_case = t.view(sigma.shape )
return t
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.sample is None
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : bool = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
__snake_case = self.index_for_timestep(SCREAMING_SNAKE_CASE )
# advance index counter by 1
__snake_case = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__snake_case = self.sigmas[step_index]
__snake_case = self.sigmas_interpol[step_index + 1]
__snake_case = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__snake_case = self.sigmas[step_index - 1]
__snake_case = self.sigmas_interpol[step_index]
__snake_case = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__snake_case = 0
__snake_case = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__snake_case = sigma_hat if self.state_in_first_order else sigma_interpol
__snake_case = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__snake_case = sigma_hat if self.state_in_first_order else sigma_interpol
__snake_case = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__snake_case = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__snake_case = sigma_interpol - sigma_hat
# store for 2nd order step
__snake_case = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__snake_case = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__snake_case = sigma_next - sigma_hat
__snake_case = self.sample
__snake_case = None
__snake_case = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , ) -> torch.FloatTensor:
'''simple docstring'''
__snake_case = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE ):
# mps does not support float64
__snake_case = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__snake_case = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__snake_case = self.timesteps.to(original_samples.device )
__snake_case = timesteps.to(original_samples.device )
__snake_case = [self.index_for_timestep(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for t in timesteps]
__snake_case = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__snake_case = sigma.unsqueeze(-1 )
__snake_case = original_samples + noise * sigma
return noisy_samples
def __len__( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 371 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowercase = logging.get_logger(__name__)
class _snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : List[str] = '''maskformer-swin'''
_UpperCamelCase : Optional[int] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : str , UpperCamelCase_ : Optional[Any]=224 , UpperCamelCase_ : Union[str, Any]=4 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : Optional[int]=96 , UpperCamelCase_ : Optional[Any]=[2, 2, 6, 2] , UpperCamelCase_ : Tuple=[3, 6, 12, 24] , UpperCamelCase_ : Tuple=7 , UpperCamelCase_ : List[str]=4.0 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : str=0.0 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : Any=False , UpperCamelCase_ : List[str]=0.0_2 , UpperCamelCase_ : Union[str, Any]=1E-5 , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Dict , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase_ : Union[str, Any] =image_size
lowerCAmelCase_ : Union[str, Any] =patch_size
lowerCAmelCase_ : str =num_channels
lowerCAmelCase_ : Union[str, Any] =embed_dim
lowerCAmelCase_ : Tuple =depths
lowerCAmelCase_ : Tuple =len(UpperCamelCase_ )
lowerCAmelCase_ : Optional[int] =num_heads
lowerCAmelCase_ : int =window_size
lowerCAmelCase_ : Tuple =mlp_ratio
lowerCAmelCase_ : Dict =qkv_bias
lowerCAmelCase_ : Union[str, Any] =hidden_dropout_prob
lowerCAmelCase_ : str =attention_probs_dropout_prob
lowerCAmelCase_ : int =drop_path_rate
lowerCAmelCase_ : Optional[int] =hidden_act
lowerCAmelCase_ : Any =use_absolute_embeddings
lowerCAmelCase_ : List[str] =layer_norm_eps
lowerCAmelCase_ : Optional[Any] =initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : Dict =int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
lowerCAmelCase_ : List[Any] =['''stem'''] + [F'stage{idx}' for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
lowerCAmelCase_ , lowerCAmelCase_ : Dict =get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
| 305 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 305 | 1 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[int] | None = None ) -> int:
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__A : Dict = nums[0]
for i in range(1 ,len(a__ ) ):
__A : List[Any] = nums[i]
__A : Optional[Any] = max(a__ ,ans + num ,a__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCAmelCase_ : List[Any] = int(input('''Enter number of elements : ''').strip())
UpperCAmelCase_ : Optional[int] = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 17 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = 3_84
if "tiny" in model_name:
__snake_case = [3, 3, 9, 3]
__snake_case = [96, 1_92, 3_84, 7_68]
if "small" in model_name:
__snake_case = [3, 3, 27, 3]
__snake_case = [96, 1_92, 3_84, 7_68]
if "base" in model_name:
__snake_case = [3, 3, 27, 3]
__snake_case = [1_28, 2_56, 5_12, 10_24]
__snake_case = 5_12
if "large" in model_name:
__snake_case = [3, 3, 27, 3]
__snake_case = [1_92, 3_84, 7_68, 15_36]
__snake_case = 7_68
if "xlarge" in model_name:
__snake_case = [3, 3, 27, 3]
__snake_case = [2_56, 5_12, 10_24, 20_48]
__snake_case = 10_24
# set label information
__snake_case = 1_50
__snake_case = '''huggingface/label-files'''
__snake_case = '''ade20k-id2label.json'''
__snake_case = json.load(open(hf_hub_download(snake_case, snake_case, repo_type='''dataset'''), '''r'''))
__snake_case = {int(snake_case): v for k, v in idalabel.items()}
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = ConvNextConfig(
depths=snake_case, hidden_sizes=snake_case, out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''])
__snake_case = UperNetConfig(
backbone_config=snake_case, auxiliary_in_channels=snake_case, num_labels=snake_case, idalabel=snake_case, labelaid=snake_case, )
return config
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight'''))
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias'''))
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight'''))
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias'''))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((f"backbone.stages.{i}.{j}.gamma", f"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter"))
rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.weight", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight"))
rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.bias", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias"))
rename_keys.append((f"backbone.stages.{i}.{j}.norm.weight", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight"))
rename_keys.append((f"backbone.stages.{i}.{j}.norm.bias", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias"))
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight"))
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias"))
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight"))
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias"))
if i > 0:
rename_keys.append((f"backbone.downsample_layers.{i}.0.weight", f"backbone.encoder.stages.{i}.downsampling_layer.0.weight"))
rename_keys.append((f"backbone.downsample_layers.{i}.0.bias", f"backbone.encoder.stages.{i}.downsampling_layer.0.bias"))
rename_keys.append((f"backbone.downsample_layers.{i}.1.weight", f"backbone.encoder.stages.{i}.downsampling_layer.1.weight"))
rename_keys.append((f"backbone.downsample_layers.{i}.1.bias", f"backbone.encoder.stages.{i}.downsampling_layer.1.bias"))
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight"))
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias"))
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
])
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
__snake_case = dct.pop(snake_case)
__snake_case = val
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
__snake_case = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
__snake_case = model_name_to_url[model_name]
__snake_case = torch.hub.load_state_dict_from_url(snake_case, map_location='''cpu''')['''state_dict''']
__snake_case = get_upernet_config(snake_case)
__snake_case = UperNetForSemanticSegmentation(snake_case)
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__snake_case = state_dict.pop(snake_case)
if "bn" in key:
__snake_case = key.replace('''bn''', '''batch_norm''')
__snake_case = val
# rename keys
__snake_case = create_rename_keys(snake_case)
for src, dest in rename_keys:
rename_key(snake_case, snake_case, snake_case)
model.load_state_dict(snake_case)
# verify on image
__snake_case = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
__snake_case = Image.open(requests.get(snake_case, stream=snake_case).raw).convert('''RGB''')
__snake_case = SegformerImageProcessor()
__snake_case = processor(snake_case, return_tensors='''pt''').pixel_values
with torch.no_grad():
__snake_case = model(snake_case)
if model_name == "upernet-convnext-tiny":
__snake_case = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]])
elif model_name == "upernet-convnext-small":
__snake_case = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]])
elif model_name == "upernet-convnext-base":
__snake_case = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]])
elif model_name == "upernet-convnext-large":
__snake_case = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]])
elif model_name == "upernet-convnext-xlarge":
__snake_case = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]])
print('''Logits:''', outputs.logits[0, 0, :3, :3])
assert torch.allclose(outputs.logits[0, 0, :3, :3], snake_case, atol=1E-4)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
model.save_pretrained(snake_case)
print(f"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(snake_case)
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub")
model.push_to_hub(f"openmmlab/{model_name}")
processor.push_to_hub(f"openmmlab/{model_name}")
if __name__ == "__main__":
__lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__lowercase : int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 718 | """simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , A_ : List[str] , A_ : List[Any]=100 , A_ : Any=13 , A_ : Dict=30 , A_ : Optional[int]=2 , A_ : str=3 , A_ : Tuple=True , A_ : str=True , A_ : Union[str, Any]=32 , A_ : int=5 , A_ : List[Any]=4 , A_ : Optional[Any]=37 , A_ : Any="gelu" , A_ : List[str]=0.1 , A_ : int=0.1 , A_ : Tuple=10 , A_ : int=0.02 , A_ : Tuple=3 , ) -> Optional[int]:
__snake_case = parent
__snake_case = vocab_size
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def lowercase ( self : Optional[Any] ) -> List[str]:
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def lowercase ( self : Optional[int] , A_ : str , A_ : str , A_ : List[str] ) -> List[Any]:
__snake_case = FlaxBeitModel(config=A_ )
__snake_case = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : List[str] , A_ : Any , A_ : Any , A_ : str ) -> str:
__snake_case = FlaxBeitForMaskedImageModeling(config=A_ )
__snake_case = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase ( self : str , A_ : List[Any] , A_ : Optional[int] , A_ : List[str] ) -> str:
__snake_case = self.type_sequence_label_size
__snake_case = FlaxBeitForImageClassification(config=A_ )
__snake_case = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case = 1
__snake_case = FlaxBeitForImageClassification(A_ )
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case = model(A_ )
def lowercase ( self : Dict ) -> List[str]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowercase ( self : List[Any] ) -> None:
__snake_case = FlaxBeitModelTester(self )
__snake_case = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def lowercase ( self : Tuple ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def lowercase ( self : int ) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(A_ )
__snake_case = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_ )
def lowercase ( self : Union[str, Any] ) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case = self._prepare_for_class(A_ , A_ )
__snake_case = model_class(A_ )
@jax.jit
def model_jitted(A_ : Union[str, Any] , **A_ : Union[str, Any] ):
return model(pixel_values=A_ , **A_ )
with self.subTest('''JIT Enabled''' ):
__snake_case = model_jitted(**A_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__snake_case = model_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase ( self : str ) -> List[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def lowercase ( self : str ) -> int:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def lowercase ( self : int ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
__snake_case = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(A_ )
def SCREAMING_SNAKE_CASE ( ):
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_vision
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : str ) -> str:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def lowercase ( self : Union[str, Any] ) -> List[str]:
__snake_case = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=A_ , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
__snake_case = np.ones((1, 196) , dtype=A_ )
# forward pass
__snake_case = model(pixel_values=A_ , bool_masked_pos=A_ )
__snake_case = outputs.logits
# verify the logits
__snake_case = (1, 196, 8_192)
self.assertEqual(logits.shape , A_ )
__snake_case = np.array(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , A_ , atol=1E-2 ) )
@slow
def lowercase ( self : List[Any] ) -> List[str]:
__snake_case = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=A_ , return_tensors='''np''' )
# forward pass
__snake_case = model(**A_ )
__snake_case = outputs.logits
# verify the logits
__snake_case = (1, 1_000)
self.assertEqual(logits.shape , A_ )
__snake_case = np.array([-1.23_85, -1.09_87, -1.01_08] )
self.assertTrue(np.allclose(logits[0, :3] , A_ , atol=1E-4 ) )
__snake_case = 281
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def lowercase ( self : int ) -> str:
__snake_case = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=A_ , return_tensors='''np''' )
# forward pass
__snake_case = model(**A_ )
__snake_case = outputs.logits
# verify the logits
__snake_case = (1, 21_841)
self.assertEqual(logits.shape , A_ )
__snake_case = np.array([1.68_81, -0.27_87, 0.59_01] )
self.assertTrue(np.allclose(logits[0, :3] , A_ , atol=1E-4 ) )
__snake_case = 2_396
self.assertEqual(logits.argmax(-1 ).item() , A_ ) | 93 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files', [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
], )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Optional[int] ):
__lowerCAmelCase = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md', 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md', 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json', 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
__lowerCAmelCase = DatasetInfosDict.from_directory(lowerCAmelCase_ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info', [
DatasetInfo(),
DatasetInfo(
description='foo', features=Features({'a': Value('int32' )} ), builder_name='builder', config_name='config', version='1.0.0', splits=[{'name': 'train'}], download_size=42, ),
], )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : DatasetInfo ):
__lowerCAmelCase = str(lowerCAmelCase_ )
dataset_info.write_to_directory(lowerCAmelCase_ )
__lowerCAmelCase = DatasetInfo.from_directory(lowerCAmelCase_ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCAmelCase_, 'dataset_info.json' ) )
def a_ ( ):
__lowerCAmelCase = DatasetInfo(
description='foo', citation='bar', homepage='https://foo.bar', license='CC0', features=Features({'a': Value('int32' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='builder', config_name='config', version='1.0.0', splits=[{'name': 'train', 'num_examples': 42}], download_checksums={}, download_size=1337, post_processing_size=442, dataset_size=1234, size_in_bytes=1337 + 442 + 1234, )
__lowerCAmelCase = dataset_info._to_yaml_dict()
assert sorted(lowerCAmelCase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
__lowerCAmelCase = yaml.safe_dump(lowerCAmelCase_ )
__lowerCAmelCase = yaml.safe_load(lowerCAmelCase_ )
assert dataset_info_yaml_dict == reloaded
def a_ ( ):
__lowerCAmelCase = DatasetInfo()
__lowerCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict', [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo', features=Features({'a': Value('int32' )} ), builder_name='builder', config_name='config', version='1.0.0', splits=[{'name': 'train'}], download_size=42, )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
], )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : DatasetInfosDict ):
__lowerCAmelCase = str(lowerCAmelCase_ )
dataset_infos_dict.write_to_directory(lowerCAmelCase_ )
__lowerCAmelCase = DatasetInfosDict.from_directory(lowerCAmelCase_ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__lowerCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__lowerCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCAmelCase_, 'README.md' ) )
| 53 |
def a_ ( lowerCAmelCase_ : int = 200_0000 ):
__lowerCAmelCase = [0 for i in range(n + 1 )]
__lowerCAmelCase = 1
__lowerCAmelCase = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, lowerCAmelCase_ ):
__lowerCAmelCase = 1
__lowerCAmelCase = 0
for i in range(lowerCAmelCase_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 53 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Tuple = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __snake_case ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """dinat"""
lowerCAmelCase__ = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , A : int=4 , A : List[Any]=3 , A : str=64 , A : Union[str, Any]=[3, 4, 6, 5] , A : Any=[2, 4, 8, 16] , A : Optional[Any]=7 , A : int=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , A : Optional[Any]=3.0 , A : Union[str, Any]=True , A : Union[str, Any]=0.0 , A : str=0.0 , A : Tuple=0.1 , A : Optional[int]="gelu" , A : Union[str, Any]=0.02 , A : Any=1E-5 , A : List[Any]=0.0 , A : Any=None , A : str=None , **A : List[str] , ):
super().__init__(**A )
__snake_case: str = patch_size
__snake_case: Tuple = num_channels
__snake_case: List[Any] = embed_dim
__snake_case: str = depths
__snake_case: str = len(A )
__snake_case: int = num_heads
__snake_case: Optional[int] = kernel_size
__snake_case: Dict = dilations
__snake_case: Optional[Any] = mlp_ratio
__snake_case: str = qkv_bias
__snake_case: Optional[Any] = hidden_dropout_prob
__snake_case: Tuple = attention_probs_dropout_prob
__snake_case: Optional[int] = drop_path_rate
__snake_case: Optional[int] = hidden_act
__snake_case: Union[str, Any] = layer_norm_eps
__snake_case: int = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case: List[str] = int(embed_dim * 2 ** (len(A ) - 1) )
__snake_case: Tuple = layer_scale_init_value
__snake_case: str = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(A ) + 1 )]
__snake_case: Union[str, Any] = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names )
| 718 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = RoCBertTokenizer
lowerCAmelCase__ = None
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = filter_non_english
def UpperCAmelCase__ ( self : List[str] ):
super().setUp()
__snake_case: Dict = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
__snake_case: List[str] = {}
__snake_case: Union[str, Any] = {}
for i, value in enumerate(A ):
__snake_case: Any = i
__snake_case: List[str] = i
__snake_case: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
__snake_case: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(A , A , ensure_ascii=A )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(A , A , ensure_ascii=A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Union[str, Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__snake_case: Any = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(A , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self : str ):
__snake_case: str = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[Any] = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: str = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Any = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: List[Any] = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Dict = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Dict = RoCBertBasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: str = RoCBertBasicTokenizer(do_lower_case=A , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__snake_case: List[str] = {}
for i, token in enumerate(A ):
__snake_case: List[str] = i
__snake_case: Optional[int] = RoCBertWordpieceTokenizer(vocab=A , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def UpperCAmelCase__ ( self : Tuple ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCAmelCase__ ( self : str ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCAmelCase__ ( self : Dict ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Tuple = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
__snake_case: Optional[int] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def UpperCAmelCase__ ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: str = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: Tuple = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__snake_case: Union[str, Any] = tokenizer_r.encode_plus(
A , return_attention_mask=A , return_token_type_ids=A , return_offsets_mapping=A , add_special_tokens=A , )
__snake_case: Optional[int] = tokenizer_r.do_lower_case if hasattr(A , """do_lower_case""" ) else False
__snake_case: Any = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = ["""的""", """人""", """有"""]
__snake_case: List[str] = """""".join(A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: Optional[int] = True
__snake_case: List[str] = self.tokenizer_class.from_pretrained(A , **A )
__snake_case: Tuple = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: Dict = tokenizer_p.encode(A , add_special_tokens=A )
__snake_case: int = tokenizer_r.encode(A , add_special_tokens=A )
__snake_case: int = tokenizer_r.convert_ids_to_tokens(A )
__snake_case: Any = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A , A )
self.assertListEqual(A , A )
__snake_case: Union[str, Any] = False
__snake_case: List[Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: Optional[int] = self.tokenizer_class.from_pretrained(A , **A )
__snake_case: Optional[Any] = tokenizer_r.encode(A , add_special_tokens=A )
__snake_case: List[Any] = tokenizer_p.encode(A , add_special_tokens=A )
__snake_case: Any = tokenizer_r.convert_ids_to_tokens(A )
__snake_case: Optional[Any] = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that only the first Chinese character is not preceded by "##".
__snake_case: List[Any] = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(A )
]
self.assertListEqual(A , A )
self.assertListEqual(A , A )
@slow
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Union[str, Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__snake_case: str = tokenizer.encode("""你好""" , add_special_tokens=A )
__snake_case: List[str] = tokenizer.encode("""你是谁""" , add_special_tokens=A )
__snake_case: Tuple = tokenizer.build_inputs_with_special_tokens(A )
__snake_case: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = self.get_tokenizers(do_lower_case=A )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case: Optional[int] = """你好,你是谁"""
__snake_case: Tuple = tokenizer.tokenize(A )
__snake_case: Optional[Any] = tokenizer.convert_tokens_to_ids(A )
__snake_case: str = tokenizer.convert_tokens_to_shape_ids(A )
__snake_case: Dict = tokenizer.convert_tokens_to_pronunciation_ids(A )
__snake_case: Union[str, Any] = tokenizer.prepare_for_model(
A , A , A , add_special_tokens=A )
__snake_case: int = tokenizer.encode_plus(A , add_special_tokens=A )
self.assertEqual(A , A )
| 155 | 0 |
"""simple docstring"""
import functools
from typing import Any
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : list[str] ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not all(
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
lowerCAmelCase : dict[str, Any] = {}
lowerCAmelCase : str = "WORD_KEEPER"
for word in words:
lowerCAmelCase : List[str] = trie
for c in word:
if c not in trie_node:
lowerCAmelCase : int = {}
lowerCAmelCase : Any = trie_node[c]
lowerCAmelCase : int = True
lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE )
# Dynamic programming method
@functools.cache
def is_breakable(SCREAMING_SNAKE_CASE : int ) -> bool:
if index == len_string:
return True
lowerCAmelCase : Union[str, Any] = trie
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[Any] = trie_node.get(string[i] , SCREAMING_SNAKE_CASE )
if trie_node is None:
return False
if trie_node.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCAmelCase__ = NewType('''DataClass''', Any)
lowerCAmelCase__ = NewType('''DataClassType''', Any)
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def a__ ( SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
lowerCAmelCase : Any = {str(SCREAMING_SNAKE_CASE ): choice for choice in choices}
return lambda SCREAMING_SNAKE_CASE : str_to_choice.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( *,
SCREAMING_SNAKE_CASE : Union[str, List[str]] = None , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : Any = dataclasses.MISSING , SCREAMING_SNAKE_CASE : Callable[[], Any] = dataclasses.MISSING , SCREAMING_SNAKE_CASE : dict = None , **SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase : Tuple = {}
if aliases is not None:
lowerCAmelCase : Union[str, Any] = aliases
if help is not None:
lowerCAmelCase : Optional[Any] = help
return dataclasses.field(metadata=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , default_factory=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Iterable[DataClassType]
def __init__( self , snake_case__ , **snake_case__ ):
"""simple docstring"""
if "formatter_class" not in kwargs:
lowerCAmelCase : Optional[int] = ArgumentDefaultsHelpFormatter
super().__init__(**snake_case__ )
if dataclasses.is_dataclass(snake_case__ ):
lowerCAmelCase : List[Any] = [dataclass_types]
lowerCAmelCase : List[str] = list(snake_case__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(snake_case__ )
@staticmethod
def lowercase__ ( snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = f"""--{field.name}"""
lowerCAmelCase : Optional[Any] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , snake_case__ ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
lowerCAmelCase : List[str] = kwargs.pop("aliases" , [] )
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : int = [aliases]
lowerCAmelCase : int = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(snake_case__ , "UnionType" ) and isinstance(snake_case__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(snake_case__ ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f""" Problem encountered in field '{field.name}'.""" )
if type(snake_case__ ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase : str = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase : Optional[int] = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase : List[Any] = (
field.type.__args__[0] if isinstance(snake_case__ , field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase : List[Any] = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase : List[Any] = {}
if origin_type is Literal or (isinstance(field.type , snake_case__ ) and issubclass(field.type , snake_case__ )):
if origin_type is Literal:
lowerCAmelCase : str = field.type.__args__
else:
lowerCAmelCase : List[str] = [x.value for x in field.type]
lowerCAmelCase : List[Any] = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase : int = field.default
else:
lowerCAmelCase : List[str] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase : Dict = copy(snake_case__ )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase : str = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase : int = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase : Any = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase : List[str] = "?"
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase : Union[str, Any] = True
elif isclass(snake_case__ ) and issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = field.type.__args__[0]
lowerCAmelCase : List[str] = "+"
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase : Union[str, Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase : int = True
else:
lowerCAmelCase : Optional[Any] = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase : Optional[int] = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase : Union[str, Any] = field.default_factory()
else:
lowerCAmelCase : List[str] = True
parser.add_argument(snake_case__ , *snake_case__ , **snake_case__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase : Any = False
parser.add_argument(f"""--no_{field.name}""" , action="store_false" , dest=field.name , **snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if hasattr(snake_case__ , "_argument_group_name" ):
lowerCAmelCase : Optional[int] = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase : Any = self
try:
lowerCAmelCase : Dict[str, type] = get_type_hints(snake_case__ )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(snake_case__ ):
lowerCAmelCase : Optional[int] = ".".join(map(snake_case__ , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(snake_case__ ):
if not field.init:
continue
lowerCAmelCase : Any = type_hints[field.name]
self._parse_dataclass_field(snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__=None , snake_case__=False , snake_case__=True , snake_case__=None , snake_case__=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase : Dict = []
if args_filename:
args_files.append(Path(snake_case__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase : Optional[Any] = ArgumentParser()
args_file_parser.add_argument(snake_case__ , type=snake_case__ , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase , lowerCAmelCase : List[Any] = args_file_parser.parse_known_args(args=snake_case__ )
lowerCAmelCase : Optional[int] = vars(snake_case__ ).get(args_file_flag.lstrip("-" ) , snake_case__ )
if cmd_args_file_paths:
args_files.extend([Path(snake_case__ ) for p in cmd_args_file_paths] )
lowerCAmelCase : Optional[Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase : List[str] = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.parse_known_args(args=snake_case__ )
lowerCAmelCase : List[Any] = []
for dtype in self.dataclass_types:
lowerCAmelCase : Union[str, Any] = {f.name for f in dataclasses.fields(snake_case__ ) if f.init}
lowerCAmelCase : List[str] = {k: v for k, v in vars(snake_case__ ).items() if k in keys}
for k in keys:
delattr(snake_case__ , snake_case__ )
lowerCAmelCase : Union[str, Any] = dtype(**snake_case__ )
outputs.append(snake_case__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(snake_case__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def lowercase__ ( self , snake_case__ , snake_case__ = False ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = set(args.keys() )
lowerCAmelCase : Optional[Any] = []
for dtype in self.dataclass_types:
lowerCAmelCase : Optional[Any] = {f.name for f in dataclasses.fields(snake_case__ ) if f.init}
lowerCAmelCase : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase : Tuple = dtype(**snake_case__ )
outputs.append(snake_case__ )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(snake_case__ )}""" )
return tuple(snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = False ):
"""simple docstring"""
with open(Path(snake_case__ ) , encoding="utf-8" ) as open_json_file:
lowerCAmelCase : Dict = json.loads(open_json_file.read() )
lowerCAmelCase : Union[str, Any] = self.parse_dict(snake_case__ , allow_extra_keys=snake_case__ )
return tuple(snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = False ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.parse_dict(yaml.safe_load(Path(snake_case__ ).read_text() ) , allow_extra_keys=snake_case__ )
return tuple(snake_case__ )
| 645 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
lowerCAmelCase : List[Any] = tempfile.mkdtemp()
lowerCAmelCase : Dict = SamImageProcessor()
lowerCAmelCase : Union[str, Any] = SamProcessor(snake_case__ )
processor.save_pretrained(self.tmpdirname )
def lowercase ( self , **snake_case__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def lowercase ( self ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self ):
lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : List[str] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self ):
lowerCAmelCase : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Any = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
lowerCAmelCase : Dict = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.get_image_processor()
lowerCAmelCase : str = SamProcessor(image_processor=snake_case__ )
lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
lowerCAmelCase : Optional[Any] = image_processor(snake_case__ , return_tensors='np' )
lowerCAmelCase : str = processor(images=snake_case__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.get_image_processor()
lowerCAmelCase : Tuple = SamProcessor(image_processor=snake_case__ )
lowerCAmelCase : List[Any] = [torch.ones((1, 3, 5, 5) )]
lowerCAmelCase : List[Any] = [[1764, 2646]]
lowerCAmelCase : Union[str, Any] = [[683, 1024]]
lowerCAmelCase : Optional[int] = processor.post_process_masks(snake_case__ , snake_case__ , snake_case__ )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowerCAmelCase : int = processor.post_process_masks(
snake_case__ , torch.tensor(snake_case__ ) , torch.tensor(snake_case__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
lowerCAmelCase : int = [np.ones((1, 3, 5, 5) )]
lowerCAmelCase : List[str] = processor.post_process_masks(snake_case__ , np.array(snake_case__ ) , np.array(snake_case__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowerCAmelCase : List[Any] = [[1, 0], [0, 1]]
with self.assertRaises(snake_case__ ):
lowerCAmelCase : Optional[Any] = processor.post_process_masks(snake_case__ , np.array(snake_case__ ) , np.array(snake_case__ ) )
@require_vision
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
lowerCAmelCase : List[str] = tempfile.mkdtemp()
lowerCAmelCase : Tuple = SamImageProcessor()
lowerCAmelCase : str = SamProcessor(snake_case__ )
processor.save_pretrained(self.tmpdirname )
def lowercase ( self , **snake_case__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def lowercase ( self ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self ):
lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self ):
lowerCAmelCase : Tuple = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : List[str] = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
lowerCAmelCase : List[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Any = self.get_image_processor()
lowerCAmelCase : str = SamProcessor(image_processor=snake_case__ )
lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase : List[Any] = image_processor(snake_case__ , return_tensors='np' )
lowerCAmelCase : Any = processor(images=snake_case__ , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def lowercase ( self ):
lowerCAmelCase : str = self.get_image_processor()
lowerCAmelCase : Tuple = SamProcessor(image_processor=snake_case__ )
lowerCAmelCase : List[str] = [tf.ones((1, 3, 5, 5) )]
lowerCAmelCase : Dict = [[1764, 2646]]
lowerCAmelCase : List[str] = [[683, 1024]]
lowerCAmelCase : Dict = processor.post_process_masks(snake_case__ , snake_case__ , snake_case__ , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowerCAmelCase : Tuple = processor.post_process_masks(
snake_case__ , tf.convert_to_tensor(snake_case__ ) , tf.convert_to_tensor(snake_case__ ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
lowerCAmelCase : Any = [np.ones((1, 3, 5, 5) )]
lowerCAmelCase : Any = processor.post_process_masks(
snake_case__ , np.array(snake_case__ ) , np.array(snake_case__ ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
lowerCAmelCase : int = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
lowerCAmelCase : Dict = processor.post_process_masks(
snake_case__ , np.array(snake_case__ ) , np.array(snake_case__ ) , return_tensors='tf' )
@require_vision
@require_torchvision
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase : List[str] = SamImageProcessor()
lowerCAmelCase : Optional[Any] = SamProcessor(snake_case__ )
processor.save_pretrained(self.tmpdirname )
def lowercase ( self , **snake_case__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def lowercase ( self ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self ):
lowerCAmelCase : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : List[str] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowercase ( self ):
lowerCAmelCase : str = self.get_image_processor()
lowerCAmelCase : Dict = SamProcessor(image_processor=snake_case__ )
lowerCAmelCase : Tuple = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
lowerCAmelCase : Union[str, Any] = [tf.convert_to_tensor(snake_case__ )]
lowerCAmelCase : str = [torch.tensor(snake_case__ )]
lowerCAmelCase : Tuple = [[1764, 2646]]
lowerCAmelCase : int = [[683, 1024]]
lowerCAmelCase : str = processor.post_process_masks(
snake_case__ , snake_case__ , snake_case__ , return_tensors='tf' )
lowerCAmelCase : int = processor.post_process_masks(
snake_case__ , snake_case__ , snake_case__ , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.get_image_processor()
lowerCAmelCase : Tuple = SamProcessor(image_processor=snake_case__ )
lowerCAmelCase : Union[str, Any] = self.prepare_image_inputs()
lowerCAmelCase : Any = image_processor(snake_case__ , return_tensors='pt' )['pixel_values'].numpy()
lowerCAmelCase : Tuple = processor(images=snake_case__ , return_tensors='pt' )['pixel_values'].numpy()
lowerCAmelCase : Optional[int] = image_processor(snake_case__ , return_tensors='tf' )['pixel_values'].numpy()
lowerCAmelCase : Tuple = processor(images=snake_case__ , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
| 646 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Tuple = GPTSwaTokenizer
_lowerCamelCase : str = False
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[Any] = False
def lowercase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : Tuple = GPTSwaTokenizer(snake_case__ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self , snake_case__ ):
lowerCAmelCase : List[Any] = 'This is a test'
lowerCAmelCase : List[Any] = 'This is a test'
return input_text, output_text
def lowercase ( self ):
lowerCAmelCase : Tuple = '<s>'
lowerCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(snake_case__ ) , 2000 )
def lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def lowercase ( self ):
lowerCAmelCase : List[Any] = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [465, 287, 265, 631, 842] )
lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(snake_case__ )
# fmt: off
self.assertListEqual(
snake_case__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def lowercase ( self ):
lowerCAmelCase : str = GPTSwaTokenizer(snake_case__ )
lowerCAmelCase : Optional[int] = ['This is a test', 'I was born in 92000, and this is falsé.']
lowerCAmelCase : Tuple = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(snake_case__ , snake_case__ ):
self.assertListEqual(tokenizer.encode_fast(snake_case__ ) , snake_case__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(snake_case__ , snake_case__ ):
self.assertEqual(tokenizer.decode_fast(snake_case__ ) , snake_case__ )
@slow
def lowercase ( self ):
lowerCAmelCase : str = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
lowerCAmelCase : Tuple = {'input_ids': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=snake_case__ , )
| 646 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_A = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class A ( __UpperCAmelCase ):
def __init__( self, **UpperCamelCase__ ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self, '''vision''' )
self.check_model_type(UpperCamelCase__ )
def __call__( self, UpperCamelCase__, UpperCamelCase__ = None, **UpperCamelCase__, ):
"""simple docstring"""
if "text_queries" in kwargs:
lowerCAmelCase_ = kwargs.pop('''text_queries''' )
if isinstance(UpperCamelCase__, (str, Image.Image) ):
lowerCAmelCase_ = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
lowerCAmelCase_ = image
lowerCAmelCase_ = super().__call__(UpperCamelCase__, **UpperCamelCase__ )
return results
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = {}
if "threshold" in kwargs:
lowerCAmelCase_ = kwargs['''threshold''']
if "top_k" in kwargs:
lowerCAmelCase_ = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = load_image(inputs['''image'''] )
lowerCAmelCase_ = inputs['''candidate_labels''']
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = candidate_labels.split(''',''' )
lowerCAmelCase_ = torch.tensor([[image.height, image.width]], dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase__ ):
lowerCAmelCase_ = self.tokenizer(UpperCamelCase__, return_tensors=self.framework )
lowerCAmelCase_ = self.image_processor(UpperCamelCase__, return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = model_inputs.pop('''target_size''' )
lowerCAmelCase_ = model_inputs.pop('''candidate_label''' )
lowerCAmelCase_ = model_inputs.pop('''is_last''' )
lowerCAmelCase_ = self.model(**UpperCamelCase__ )
lowerCAmelCase_ = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=0.1, UpperCamelCase__=None ):
"""simple docstring"""
lowerCAmelCase_ = []
for model_output in model_outputs:
lowerCAmelCase_ = model_output['''candidate_label''']
lowerCAmelCase_ = BaseModelOutput(UpperCamelCase__ )
lowerCAmelCase_ = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase__, threshold=UpperCamelCase__, target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
lowerCAmelCase_ = outputs['''scores'''][index].item()
lowerCAmelCase_ = self._get_bounding_box(outputs['''boxes'''][index][0] )
lowerCAmelCase_ = {'''score''': score, '''label''': label, '''box''': box}
results.append(UpperCamelCase__ )
lowerCAmelCase_ = sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x["score"], reverse=UpperCamelCase__ )
if top_k:
lowerCAmelCase_ = results[:top_k]
return results
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = box.int().tolist()
lowerCAmelCase_ = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 431 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
@dataclass
class A :
__snake_case = 42
__snake_case = field(default_factory=__UpperCAmelCase )
__snake_case = field(default_factory=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase__, nn.Convad ) or isinstance(UpperCamelCase__, nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase__ )
def __call__( self, UpperCamelCase__ ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase__ )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return list(filter(lambda UpperCamelCase__ : len(list(x.state_dict().keys() ) ) > 0, self.traced ) )
@dataclass
class A :
__snake_case = 42
__snake_case = 42
__snake_case = 1
__snake_case = field(default_factory=__UpperCAmelCase )
__snake_case = field(default_factory=__UpperCAmelCase )
__snake_case = True
def __call__( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = Tracker(self.dest )(UpperCamelCase__ ).parametrized
lowerCAmelCase_ = Tracker(self.src )(UpperCamelCase__ ).parametrized
lowerCAmelCase_ = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.src_skip, UpperCamelCase__ ) )
lowerCAmelCase_ = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.dest_skip, UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ) and self.raise_if_mismatch:
raise Exception(
f"Numbers of operations are different. Source module has {len(UpperCamelCase__ )} operations while"
f" destination module has {len(UpperCamelCase__ )}." )
for dest_m, src_m in zip(UpperCamelCase__, UpperCamelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
class A ( nn.Module ):
def __init__( self, UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
lowerCAmelCase_ = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block''' ), f"Unexpected layer name {k}"
lowerCAmelCase_ = len(UpperCamelCase__ ) + 1
feature_blocks.append((f"res{block_index}", v) )
lowerCAmelCase_ = nn.ModuleDict(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return get_trunk_forward_outputs(
UpperCamelCase__, out_feat_keys=UpperCamelCase__, feature_blocks=self._feature_blocks, )
class A ( __UpperCAmelCase ):
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = x.split('''-''' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self, UpperCamelCase__ ):
"""simple docstring"""
if x not in self:
lowerCAmelCase_ = self.convert_name_to_timm(UpperCamelCase__ )
lowerCAmelCase_ = partial(lambda: (timm.create_model(UpperCamelCase__, pretrained=UpperCamelCase__ ).eval(), None) )
else:
lowerCAmelCase_ = super().__getitem__(UpperCamelCase__ )
return val
class A ( __UpperCAmelCase ):
def __getitem__( self, UpperCamelCase__ ):
"""simple docstring"""
if "seer" in x and "in1k" not in x:
lowerCAmelCase_ = RegNetModel
else:
lowerCAmelCase_ = RegNetForImageClassification
return val
def __UpperCamelCase ( _A , _A , _A ):
for from_key, to_key in keys:
lowerCAmelCase_ = from_state_dict[from_key].clone()
print(f"Copied key={from_key} to={to_key}" )
return to_state_dict
def __UpperCamelCase ( _A , _A , _A , _A , _A , _A = True , ):
print(f"Converting {name}..." )
with torch.no_grad():
lowerCAmelCase_ , lowerCAmelCase_ = from_model_func()
lowerCAmelCase_ = our_model_func(_A ).eval()
lowerCAmelCase_ = ModuleTransfer(src=_A , dest=_A , raise_if_mismatch=_A )
lowerCAmelCase_ = torch.randn((1, 3, 224, 224) )
module_transfer(_A )
if from_state_dict is not None:
lowerCAmelCase_ = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
lowerCAmelCase_ = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
lowerCAmelCase_ = manually_copy_vissl_head(_A , our_model.state_dict() , _A )
our_model.load_state_dict(_A )
lowerCAmelCase_ = our_model(_A , output_hidden_states=_A )
lowerCAmelCase_ = (
our_outputs.logits if isinstance(_A , _A ) else our_outputs.last_hidden_state
)
lowerCAmelCase_ = from_model(_A )
lowerCAmelCase_ = from_output[-1] if type(_A ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
lowerCAmelCase_ = our_outputs.hidden_states[-1]
assert torch.allclose(_A , _A ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=_A , )
lowerCAmelCase_ = 224 if '''seer''' not in name else 384
# we can use the convnext one
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=_A )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=_A , )
print(f"Pushed {name}" )
def __UpperCamelCase ( _A , _A = None , _A = True ):
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = 1000
lowerCAmelCase_ = (1, num_labels)
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='''dataset''' ) ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = partial(_A , num_labels=_A , idalabel=_A , labelaid=_A )
lowerCAmelCase_ = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ),
}
lowerCAmelCase_ = NameToOurModelFuncMap()
lowerCAmelCase_ = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_A , _A ) -> Tuple[nn.Module, Dict]:
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , model_dir=str(_A ) , map_location='''cpu''' )
lowerCAmelCase_ = model_func()
# check if we have a head, if yes add it
lowerCAmelCase_ = files['''classy_state_dict''']['''base_model''']['''model''']
lowerCAmelCase_ = model_state_dict['''trunk''']
model.load_state_dict(_A )
return model.eval(), model_state_dict["heads"]
# pretrained
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCAmelCase_ = partial(
_A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
_A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _A , _A , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _A , _A , _A , )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 431 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase : int , lowercase : int , lowercase : int ) ->int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(lowercase , exponent // 2 , lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase , exponent - 1 , lowercase )) % modulo_value
def _lowerCAmelCase ( lowercase : int = 1_7_7_7 , lowercase : int = 1_8_5_5 , lowercase : int = 8 ) ->int:
"""simple docstring"""
lowercase__ = base
for _ in range(1 , lowercase ):
lowercase__ = _modexpt(lowercase , lowercase , 1_0**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 719 |
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def _lowerCAmelCase ( ) ->Optional[int]:
"""simple docstring"""
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=lowercase , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=lowercase , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=lowercase , help='''where to store parsed gold_data_path file''' , )
lowercase__ = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
lowercase__ = json.load(lowercase )
for dpr_record in tqdm(lowercase ):
lowercase__ = dpr_record['''question''']
lowercase__ = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(lowercase ) + '''\n''' )
if __name__ == "__main__":
main()
| 318 | 0 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
_A: Dict = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_A: Any = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_A: int = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def __lowerCamelCase ( self , __A , __A , __A=False ):
__UpperCAmelCase = spearmanr(__A , __A )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 126 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A: List[str] = logging.get_logger(__name__)
_A: Optional[Any] = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : List[Any] = """instructblip_vision_model"""
def __init__( self , __A=1_408 , __A=6_144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=1E-6 , __A=0.0 , __A=1E-10 , __A=True , **__A , ):
super().__init__(**__A )
__UpperCAmelCase = hidden_size
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = patch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = hidden_act
__UpperCAmelCase = qkv_bias
@classmethod
def __lowerCamelCase ( cls , __A , **__A ):
cls._set_token_in_kwargs(__A )
__UpperCAmelCase , __UpperCAmelCase = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCAmelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__A , **__A )
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : Optional[Any] = """instructblip_qformer"""
def __init__( self , __A=30_522 , __A=768 , __A=12 , __A=12 , __A=3_072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1_408 , **__A , ):
super().__init__(pad_token_id=__A , **__A )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = cross_attention_frequency
__UpperCAmelCase = encoder_hidden_size
@classmethod
def __lowerCamelCase ( cls , __A , **__A ):
cls._set_token_in_kwargs(__A )
__UpperCAmelCase , __UpperCAmelCase = cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCAmelCase = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__A , **__A )
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : Dict = """instructblip"""
_A : List[Any] = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ):
super().__init__(**__A )
if vision_config is None:
__UpperCAmelCase = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__UpperCAmelCase = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__UpperCAmelCase = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__UpperCAmelCase = InstructBlipVisionConfig(**__A )
__UpperCAmelCase = InstructBlipQFormerConfig(**__A )
__UpperCAmelCase = text_config['model_type'] if 'model_type' in text_config else 'opt'
__UpperCAmelCase = CONFIG_MAPPING[text_model_type](**__A )
__UpperCAmelCase = self.text_config.tie_word_embeddings
__UpperCAmelCase = self.text_config.is_encoder_decoder
__UpperCAmelCase = num_query_tokens
__UpperCAmelCase = self.vision_config.hidden_size
__UpperCAmelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__UpperCAmelCase = 1.0
__UpperCAmelCase = 0.0_2
@classmethod
def __lowerCamelCase ( cls , __A , __A , __A , **__A , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def __lowerCamelCase ( self ):
__UpperCAmelCase = copy.deepcopy(self.__dict__ )
__UpperCAmelCase = self.vision_config.to_dict()
__UpperCAmelCase = self.qformer_config.to_dict()
__UpperCAmelCase = self.text_config.to_dict()
__UpperCAmelCase = self.__class__.model_type
return output
| 126 | 1 |
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: list[str] | None = None ) -> list[list[str]]:
'''simple docstring'''
A__ = word_bank or []
# create a table
A__ = len(SCREAMING_SNAKE_CASE_ ) + 1
A__ = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
table.append([] )
# seed value
A__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(SCREAMING_SNAKE_CASE_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(SCREAMING_SNAKE_CASE_ )] == word:
A__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(SCREAMING_SNAKE_CASE_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(SCREAMING_SNAKE_CASE_ )]:
combination.reverse()
return table[len(SCREAMING_SNAKE_CASE_ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 626 |
from math import factorial
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 1_0_0 ) -> int:
'''simple docstring'''
return sum(map(SCREAMING_SNAKE_CASE_ , str(factorial(SCREAMING_SNAKE_CASE_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 626 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''The output directory where the model will be written.'''} , )
__A = field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} , )
__A = field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} )
def SCREAMING_SNAKE_CASE ( ):
lowercase = HfArgumentParser((ModelArguments,) )
((lowercase) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowercase = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowercase = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowercase = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowercase = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowercase = True
lowercase = True
lowercase = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowercase_ , decoder_config=lowercase_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowercase = decoder_config.decoder_start_token_id
lowercase = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowercase = decoder_config.bos_token_id
if pad_token_id is None:
lowercase = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowercase = decoder_config.eos_token_id
lowercase = decoder_start_token_id
lowercase = pad_token_id
lowercase = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowercase = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowercase = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 588 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : tuple[int, int] , lowercase_ : tuple[int, int] , lowercase_ : bool , ):
lowercase , lowercase = grid.shape
lowercase = [-1, 1, 0, 0]
lowercase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase , lowercase = [(0, source)], set()
lowercase = np.full((rows, cols) , np.inf )
lowercase = 0
lowercase = np.empty((rows, cols) , dtype=lowercase_ )
lowercase = None
while queue:
((lowercase) , (lowercase)) = heappop(lowercase_ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase = []
while (x, y) != source:
path.append((x, y) )
lowercase , lowercase = predecessors[x, y]
path.append(lowercase_ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase_ ) ):
lowercase , lowercase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase_ , (dist + 1, (nx, ny)) )
lowercase = dist + 1
lowercase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Dict=18 , UpperCAmelCase_ : Optional[int]=30 , UpperCAmelCase_ : int=400 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Tuple=[0.5, 0.5, 0.5] , ):
SCREAMING_SNAKE_CASE : Union[str, Any] = size if size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = min_resolution
SCREAMING_SNAKE_CASE : List[str] = max_resolution
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : Dict = size
SCREAMING_SNAKE_CASE : str = do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean
SCREAMING_SNAKE_CASE : str = image_std
def _A ( self : Union[str, Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = DPTImageProcessor if is_vision_available() else None
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Tuple = DPTImageProcessingTester(self )
@property
def _A ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : int ):
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , "image_mean" ) )
self.assertTrue(hasattr(_a , "image_std" ) )
self.assertTrue(hasattr(_a , "do_normalize" ) )
self.assertTrue(hasattr(_a , "do_resize" ) )
self.assertTrue(hasattr(_a , "size" ) )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 705 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser(lowercase )
SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE : Optional[Any] = TensorFlowBenchmark(args=lowercase )
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
SCREAMING_SNAKE_CASE : int = "Arg --no_{0} is no longer used, please use --no-{0} instead."
SCREAMING_SNAKE_CASE : Optional[int] = " ".join(str(lowercase ).split(" " )[:-1] )
SCREAMING_SNAKE_CASE : Union[str, Any] = ""
SCREAMING_SNAKE_CASE : Any = eval(str(lowercase ).split(" " )[-1] )
SCREAMING_SNAKE_CASE : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase )
if len(lowercase ) > 0:
SCREAMING_SNAKE_CASE : Optional[int] = full_error_msg + begin_error_msg + str(lowercase )
raise ValueError(lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 488 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : List[Any] = logging.get_logger(__name__)
def snake_case_ (UpperCamelCase : Dict ):
'''simple docstring'''
if "resnet-50" in model_name:
_a = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
_a = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
_a = DetrConfig(use_timm_backbone=UpperCamelCase , backbone_config=UpperCamelCase )
# set label attributes
_a = '''panoptic''' in model_name
if is_panoptic:
_a = 250
else:
_a = 91
_a = '''huggingface/label-files'''
_a = '''coco-detection-id2label.json'''
_a = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_a = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
f'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
f'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
f'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
f'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def snake_case_ (UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = state_dict.pop(UpperCamelCase )
_a = val
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any]=False ):
'''simple docstring'''
_a = ''''''
if is_panoptic:
_a = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_a = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
_a = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[:256, :]
_a = in_proj_bias[:256]
_a = in_proj_weight[256:512, :]
_a = in_proj_bias[256:512]
_a = in_proj_weight[-256:, :]
_a = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_a = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_a = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[:256, :]
_a = in_proj_bias[:256]
_a = in_proj_weight[256:512, :]
_a = in_proj_bias[256:512]
_a = in_proj_weight[-256:, :]
_a = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_a = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
_a = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_a = in_proj_weight_cross_attn[:256, :]
_a = in_proj_bias_cross_attn[:256]
_a = in_proj_weight_cross_attn[256:512, :]
_a = in_proj_bias_cross_attn[256:512]
_a = in_proj_weight_cross_attn[-256:, :]
_a = in_proj_bias_cross_attn[-256:]
def snake_case_ ():
'''simple docstring'''
_a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Tuple=None , UpperCamelCase : int=False ):
'''simple docstring'''
_a , _a = get_detr_config(UpperCamelCase )
# load original model from torch hub
_a = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f'Converting model {model_name}...' )
_a = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=UpperCamelCase ).eval()
_a = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCamelCase ):
if is_panoptic:
_a = '''detr.''' + src
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase , is_panoptic=UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_a = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
_a = state_dict.pop(UpperCamelCase )
_a = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_a = state_dict.pop(UpperCamelCase )
_a = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
_a = state_dict.pop(UpperCamelCase )
_a = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
_a = state_dict.pop(UpperCamelCase )
_a = val
# finally, create HuggingFace model and load state dict
_a = DetrForSegmentation(UpperCamelCase ) if is_panoptic else DetrForObjectDetection(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# verify our conversion on an image
_a = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
_a = DetrImageProcessor(format=UpperCamelCase )
_a = processor(images=prepare_img() , return_tensors='''pt''' )
_a = encoding['''pixel_values''']
_a = detr(UpperCamelCase )
_a = model(UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f'nielsr/{model_name}' )
processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
_snake_case : Optional[int] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 22 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_snake_case : Tuple = logging.get_logger(__name__)
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = size if size is not None else {'''shortest_edge''': 2_56}
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple ) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Tuple] = None ) -> Any:
"""simple docstring"""
_a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase_ ):
_a = target_sizes.numpy()
_a = []
for idx in range(len(lowerCAmelCase_ ) ):
_a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ )
_a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase_ )
else:
_a = logits.argmax(dim=1 )
_a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 22 | 1 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCamelCase__ :
__lowerCamelCase = None
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowerCamelCase__: int = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase__: int = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __a )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowerCamelCase__: List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__: Dict = os.path.join(__a , """feat_extract.json""" )
feat_extract_first.to_json_file(__a )
lowerCamelCase__: int = self.feature_extraction_class.from_json_file(__a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase__: List[str] = feat_extract_first.save_pretrained(__a )[0]
check_json_file_has_correct_format(__a )
lowerCamelCase__: Union[str, Any] = self.feature_extraction_class.from_pretrained(__a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: str = self.feature_extraction_class()
self.assertIsNotNone(__a )
| 704 |
from __future__ import annotations
from random import choice
def __lowerCAmelCase ( _UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return choice(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> int:
'''simple docstring'''
lowerCamelCase__: Optional[Any] = random_pivot(_UpperCamelCase )
# partition based on pivot
# linear time
lowerCamelCase__: Union[str, Any] = [e for e in lst if e < pivot]
lowerCamelCase__: Dict = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_UpperCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_UpperCamelCase ) < k - 1:
return kth_number(_UpperCamelCase , k - len(_UpperCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242 | 0 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
def wrapper(*_UpperCAmelCase, **_UpperCAmelCase ):
lowerCAmelCase : str = timeit.default_timer()
lowerCAmelCase : str = func(*_UpperCAmelCase, **_UpperCAmelCase )
lowerCAmelCase : Optional[int] = timeit.default_timer() - starttime
return delta
lowerCAmelCase : Union[str, Any] = func.__name__
return wrapper
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=100, _UpperCAmelCase=None ) -> Any:
'''simple docstring'''
lowerCAmelCase : Dict = []
lowerCAmelCase : Optional[int] = seq_shapes or {}
for i in range(_UpperCAmelCase ):
lowerCAmelCase : Any = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_UpperCAmelCase, _ArrayXD ):
lowerCAmelCase : Dict = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_UpperCAmelCase, datasets.Value ):
if v.dtype == "string":
lowerCAmelCase : Any = 'The small grey turtle was surprisingly fast when challenged.'
else:
lowerCAmelCase : Optional[Any] = np.random.randint(10, size=1 ).astype(v.dtype ).item()
elif isinstance(_UpperCAmelCase, datasets.Sequence ):
while isinstance(_UpperCAmelCase, datasets.Sequence ):
lowerCAmelCase : int = v.feature
lowerCAmelCase : Optional[int] = seq_shapes[k]
lowerCAmelCase : str = np.random.rand(*_UpperCAmelCase ).astype(v.dtype )
lowerCAmelCase : Any = data
dummy_data.append((i, example) )
return dummy_data
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=100, _UpperCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : Any = generate_examples(_UpperCAmelCase, num_examples=_UpperCAmelCase, seq_shapes=_UpperCAmelCase )
with ArrowWriter(features=_UpperCAmelCase, path=_UpperCAmelCase ) as writer:
for key, record in dummy_data:
lowerCAmelCase : Any = features.encode_example(_UpperCAmelCase )
writer.write(_UpperCAmelCase )
lowerCAmelCase , lowerCAmelCase : Optional[int] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCAmelCase : int = datasets.Dataset.from_file(filename=_UpperCAmelCase, info=datasets.DatasetInfo(features=_UpperCAmelCase ) )
return dataset
| 343 |
from ... import PretrainedConfig
__A : int = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCAmelCase_ : str = "nezha"
def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[int]=21128 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : int=3072 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : Tuple=64 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Dict=1E-12 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Optional[Any]=True , **UpperCAmelCase_ : Any , ):
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Union[str, Any] = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : Tuple = num_attention_heads
lowerCAmelCase : List[str] = hidden_act
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : Any = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : Tuple = max_relative_position
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : int = layer_norm_eps
lowerCAmelCase : List[str] = classifier_dropout
lowerCAmelCase : Optional[Any] = use_cache
| 343 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = DanceDiffusionPipeline
__a = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__a = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
__a = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__a = False
__a = False
def UpperCamelCase_ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Dict= UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowerCAmelCase , use_timestep_embedding=lowerCAmelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
SCREAMING_SNAKE_CASE__: str= IPNDMScheduler()
SCREAMING_SNAKE_CASE__: str= {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Any:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: str= '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: Optional[Any]= DanceDiffusionPipeline(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= pipe(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= output.audios
SCREAMING_SNAKE_CASE__: Union[str, Any]= audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
SCREAMING_SNAKE_CASE__: Union[str, Any]= np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase_ ( self ) -> List[Any]:
return super().test_save_load_local()
@skip_mps
def UpperCamelCase_ ( self ) -> Tuple:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCamelCase_ ( self ) -> Any:
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase_ ( self ) -> Any:
return super().test_attention_slicing_forward_pass()
def UpperCamelCase_ ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: int= torch_device
SCREAMING_SNAKE_CASE__: Union[str, Any]= DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
SCREAMING_SNAKE_CASE__: Dict= pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[str]= pipe(generator=lowerCAmelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
SCREAMING_SNAKE_CASE__: Union[str, Any]= output.audios
SCREAMING_SNAKE_CASE__: Tuple= audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
SCREAMING_SNAKE_CASE__: Optional[Any]= np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch_device
SCREAMING_SNAKE_CASE__: List[Any]= DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__: Any= pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Any= pipe(generator=lowerCAmelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
SCREAMING_SNAKE_CASE__: Dict= output.audios
SCREAMING_SNAKE_CASE__: List[str]= audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
SCREAMING_SNAKE_CASE__: int= np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 107 | import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=18 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , ) -> Any:
SCREAMING_SNAKE_CASE__: Tuple= size if size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: Tuple= batch_size
SCREAMING_SNAKE_CASE__: int= num_channels
SCREAMING_SNAKE_CASE__: List[Any]= image_size
SCREAMING_SNAKE_CASE__: Dict= min_resolution
SCREAMING_SNAKE_CASE__: Union[str, Any]= max_resolution
SCREAMING_SNAKE_CASE__: Optional[Any]= do_resize
SCREAMING_SNAKE_CASE__: List[Any]= size
SCREAMING_SNAKE_CASE__: Optional[Any]= apply_ocr
def UpperCamelCase_ ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: List[Any]= LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''apply_ocr''' ) )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def UpperCamelCase_ ( self ) -> Any:
pass
def UpperCamelCase_ ( self ) -> List[str]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: int= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Optional[int]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: str= image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase )
self.assertIsInstance(encoding.boxes , lowerCAmelCase )
# Test batched
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: Dict= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: Dict= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Tuple= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: int= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Optional[Any]:
# with apply_OCR = True
SCREAMING_SNAKE_CASE__: int= LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: int= load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
SCREAMING_SNAKE_CASE__: str= Image.open(ds[0]['''file'''] ).convert('''RGB''' )
SCREAMING_SNAKE_CASE__: str= image_processing(lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE__: Dict= [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
SCREAMING_SNAKE_CASE__: List[Any]= [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase )
self.assertListEqual(encoding.boxes , lowerCAmelCase )
# with apply_OCR = False
SCREAMING_SNAKE_CASE__: int= LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 107 | 1 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
UpperCAmelCase_ : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
UpperCAmelCase_ : int = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
UpperCAmelCase_ : Optional[Any] = BeautifulSoup(res.text, "html.parser")
UpperCAmelCase_ : Optional[Any] = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(F"""https://google.com{link.get("href")}""")
| 21 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
A_ : List[str] ="""https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
A_ : int =requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
A_ : Tuple =BeautifulSoup(res.text, """html.parser""")
A_ : Optional[Any] =list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F'''https://google.com{link.get('href')}''') | 483 | 0 |
'''simple docstring'''
import re
import subprocess
import sys
UpperCamelCase__ : Optional[Any] = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
UpperCamelCase__ : int = subprocess.check_output(f'git diff --name-only {fork_point_sha}'.split()).decode('utf-8').split()
UpperCamelCase__ : List[Any] = '|'.join(sys.argv[1:])
UpperCamelCase__ : Optional[int] = re.compile(rf'^({joined_dirs}).*?\.py$')
UpperCamelCase__ : List[str] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 714 |
'''simple docstring'''
from manim import *
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Optional[Any] = Rectangle(height=0.5 , width=0.5 )
A_ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A_ : Union[str, Any] = Rectangle(height=0.25 , width=0.25 )
A_ : Any = [mem.copy() for i in range(6 )]
A_ : Tuple = [mem.copy() for i in range(6 )]
A_ : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Optional[Any] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Any = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Any = Text("""CPU""" , font_size=24 )
A_ : Any = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCamelCase )
A_ : Tuple = [mem.copy() for i in range(4 )]
A_ : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Union[str, Any] = Text("""GPU""" , font_size=24 )
A_ : List[str] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(_lowerCamelCase )
A_ : Optional[int] = [mem.copy() for i in range(6 )]
A_ : List[Any] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : str = Text("""Model""" , font_size=24 )
A_ : Any = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(_lowerCamelCase )
A_ : List[Any] = []
A_ : str = []
for i, rect in enumerate(_lowerCamelCase ):
A_ : Dict = fill.copy().set_fill(_lowerCamelCase , opacity=0.8 )
target.move_to(_lowerCamelCase )
model_arr.append(_lowerCamelCase )
A_ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCamelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_lowerCamelCase )
self.add(*_lowerCamelCase , *_lowerCamelCase )
A_ : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
A_ : Tuple = [meta_mem.copy() for i in range(6 )]
A_ : List[str] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Any = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Dict = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Union[str, Any] = Text("""Disk""" , font_size=24 )
A_ : Union[str, Any] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_lowerCamelCase , _lowerCamelCase )
A_ : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ : Union[str, Any] = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowerCamelCase )
A_ : List[str] = MarkupText(
F"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase ) )
A_ : Optional[int] = Square(0.3 )
input.set_fill(_lowerCamelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _lowerCamelCase , buff=0.5 )
self.play(Write(_lowerCamelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_lowerCamelCase , buff=0.02 )
self.play(MoveToTarget(_lowerCamelCase ) )
self.play(FadeOut(_lowerCamelCase ) )
A_ : Optional[int] = Arrow(start=_lowerCamelCase , end=_lowerCamelCase , color=_lowerCamelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _lowerCamelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
A_ : Union[str, Any] = MarkupText(
F"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase , run_time=3 ) )
A_ : Any = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(_lowerCamelCase ) , Circumscribe(model_arr[0] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(model_cpu_arr[0] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCamelCase , **_lowerCamelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
A_ : Tuple = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _lowerCamelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
A_ : List[str] = AnimationGroup(
FadeOut(_lowerCamelCase , run_time=0.5 ) , MoveToTarget(_lowerCamelCase , run_time=0.5 ) , FadeIn(_lowerCamelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_lowerCamelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
A_ : Any = 0.7
self.play(
Circumscribe(model_arr[i] , **_lowerCamelCase ) , Circumscribe(cpu_left_col_base[i] , **_lowerCamelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(model_arr[i + 1] , color=_lowerCamelCase , **_lowerCamelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_lowerCamelCase , **_lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCamelCase , **_lowerCamelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
A_ : Any = a_c
A_ : Dict = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_lowerCamelCase ) , FadeOut(_lowerCamelCase , run_time=0.5 ) , )
A_ : Tuple = MarkupText(F"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase , run_time=3 ) , MoveToTarget(_lowerCamelCase ) )
self.wait()
| 385 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Dict , a__ : WhisperForConditionalGeneration , a__ : WhisperProcessor , a__ : AutoencoderKL , a__ : CLIPTextModel , a__ : CLIPTokenizer , a__ : UNetaDConditionModel , a__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , a__ : StableDiffusionSafetyChecker , a__ : CLIPImageProcessor , ):
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=__SCREAMING_SNAKE_CASE , speech_processor=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , )
def a (self : int , a__ : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__SCREAMING_SNAKE_CASE )
def a (self : Union[str, Any] ):
"""simple docstring"""
self.enable_attention_slicing(__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__(self : str , a__ : int , a__ : Dict=1_6000 , a__ : int = 512 , a__ : int = 512 , a__ : int = 50 , a__ : float = 7.5 , a__ : Optional[Union[str, List[str]]] = None , a__ : Optional[int] = 1 , a__ : float = 0.0 , a__ : Optional[torch.Generator] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[str] = "pil" , a__ : bool = True , a__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a__ : int = 1 , **a__ : Optional[int] , ):
"""simple docstring"""
__snake_case = self.speech_processor.feature_extractor(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , sampling_rate=__SCREAMING_SNAKE_CASE ).input_features.to(self.device )
__snake_case = self.speech_model.generate(__SCREAMING_SNAKE_CASE , max_length=48_0000 )
__snake_case = self.speech_processor.tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , normalize=__SCREAMING_SNAKE_CASE )[
0
]
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = 1
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = len(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__SCREAMING_SNAKE_CASE )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__SCREAMING_SNAKE_CASE )}.""" )
# get prompt text embeddings
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , __SCREAMING_SNAKE_CASE , 1 )
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , __SCREAMING_SNAKE_CASE , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''''''] * batch_size
elif type(__SCREAMING_SNAKE_CASE ) is not type(__SCREAMING_SNAKE_CASE ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(__SCREAMING_SNAKE_CASE )} !="""
f""" {type(__SCREAMING_SNAKE_CASE )}.""" )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = [negative_prompt]
elif batch_size != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(__SCREAMING_SNAKE_CASE )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , __SCREAMING_SNAKE_CASE , 1 )
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , __SCREAMING_SNAKE_CASE , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device='''cpu''' , dtype=__SCREAMING_SNAKE_CASE ).to(
self.device )
else:
__snake_case = torch.randn(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__snake_case = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(__SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# predict the noise residual
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2 )
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = 1 / 0.1_8_2_1_5 * latents
__snake_case = self.vae.decode(__SCREAMING_SNAKE_CASE ).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__SCREAMING_SNAKE_CASE , nsfw_content_detected=__SCREAMING_SNAKE_CASE )
| 592 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
lowerCAmelCase__ ="examples/"
lowerCAmelCase__ ={
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
lowerCAmelCase__ ={
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
lowerCAmelCase__ ="README.md"
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]:
with open(UpperCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern]
__SCREAMING_SNAKE_CASE = replace.replace('''VERSION''' , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = re_pattern.sub(UpperCAmelCase__ , UpperCAmelCase__ )
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCAmelCase__ )
def _a ( UpperCAmelCase__ ) -> Any:
for folder, directories, fnames in os.walk(UpperCAmelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , pattern='''examples''' )
def _a ( UpperCAmelCase__ , UpperCAmelCase__=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if not patch:
update_version_in_examples(UpperCAmelCase__ )
def _a ( ) -> Dict:
__SCREAMING_SNAKE_CASE = '''🤗 Transformers currently provides the following architectures'''
__SCREAMING_SNAKE_CASE = '''1. Want to contribute a new model?'''
with open(UpperCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
# Find the start of the list.
__SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__SCREAMING_SNAKE_CASE = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__SCREAMING_SNAKE_CASE = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCAmelCase__ )
def _a ( ) -> Tuple:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE = REPLACE_PATTERNS['''init'''][0].search(UpperCAmelCase__ ).groups()[0]
return packaging.version.parse(UpperCAmelCase__ )
def _a ( UpperCAmelCase__=False ) -> Dict:
__SCREAMING_SNAKE_CASE = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__SCREAMING_SNAKE_CASE = default_version.base_version
elif patch:
__SCREAMING_SNAKE_CASE = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__SCREAMING_SNAKE_CASE = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__SCREAMING_SNAKE_CASE = input(f"""Which version are you releasing? [{default_version}]""" )
if len(UpperCAmelCase__ ) == 0:
__SCREAMING_SNAKE_CASE = default_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCAmelCase__ , patch=UpperCAmelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def _a ( ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = get_version()
__SCREAMING_SNAKE_CASE = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__SCREAMING_SNAKE_CASE = current_version.base_version
# Check with the user we got that right.
__SCREAMING_SNAKE_CASE = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCAmelCase__ ) == 0:
__SCREAMING_SNAKE_CASE = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCAmelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase__ =argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
lowerCAmelCase__ =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 482 | 0 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
UpperCAmelCase__ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> int:
super().__init__()
if hasattr(scheduler.config , 'steps_offset') and scheduler.config.steps_offset != 1:
__snake_case = (
F"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
F" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1' , '1.0.0' , lowercase_ , standard_warn=lowercase_)
__snake_case = dict(scheduler.config)
__snake_case = 1
__snake_case = FrozenDict(lowercase_)
if hasattr(scheduler.config , 'skip_prk_steps') and scheduler.config.skip_prk_steps is False:
__snake_case = (
F"The configuration file of this scheduler: {scheduler} has not set the configuration"
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set' , '1.0.0' , lowercase_ , standard_warn=lowercase_)
__snake_case = dict(scheduler.config)
__snake_case = True
__snake_case = FrozenDict(lowercase_)
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
segmentation_model=lowercase_ , segmentation_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Dict:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Union[str, Any]:
self.enable_attention_slicing(lowercase_)
def _a ( self) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`')
__snake_case = torch.device('cuda')
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_)
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _a ( self) -> Union[str, Any]:
if self.device != torch.device('meta') or not hasattr(self.unet , '_hf_hook'):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook')
and hasattr(module._hf_hook , 'execution_device')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.segmentation_processor(
text=[text] , images=[image] , padding='max_length' , return_tensors='pt').to(self.device)
__snake_case = self.segmentation_model(**lowercase_)
__snake_case = torch.sigmoid(outputs.logits).cpu().detach().unsqueeze(-1).numpy()
__snake_case = self.numpy_to_pil(lowercase_)[0].resize(image.size)
# Run inpainting pipeline with the generated mask
__snake_case = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , )
| 702 |
from __future__ import annotations
UpperCAmelCase__ : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case = 1
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case = init[0]
__snake_case = init[1]
__snake_case = 0
__snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case = [[f, g, x, y]]
__snake_case = False # flag that is set when search is complete
__snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case = cell.pop()
__snake_case = next_cell[2]
__snake_case = next_cell[3]
__snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case = x + DIRECTIONS[i][0]
__snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case = g + cost
__snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case = 1
__snake_case = i
__snake_case = []
__snake_case = goal[0]
__snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case = x - DIRECTIONS[action[x][y]][0]
__snake_case = y - DIRECTIONS[action[x][y]][1]
__snake_case = xa
__snake_case = ya
invpath.append([x, y] )
__snake_case = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ : int = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ : Optional[int] = 99
UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 676 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = '''▁'''
SCREAMING_SNAKE_CASE_ = {'''vocab_file''': '''spiece.model'''}
SCREAMING_SNAKE_CASE_ = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
SCREAMING_SNAKE_CASE_ = {
'''google/pegasus-xsum''': 512,
}
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class _UpperCAmelCase ( __a ):
__SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase_ , lowercase_="<pad>" , lowercase_="</s>" , lowercase_="<unk>" , lowercase_="<mask_2>" , lowercase_="<mask_1>" , lowercase_=None , lowercase_=1_0_3 , lowercase_ = None , **lowercase_ , ) -> None:
UpperCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError(
F"additional_special_tokens should be of type {type(lowerCAmelCase__ )}, but is"
F" {type(lowerCAmelCase__ )}" )
UpperCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(lowerCAmelCase__ ) , self.offset - 1 )
]
if len(set(lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
UpperCAmelCase = additional_special_tokens_extended
else:
UpperCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )]
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token_sent=lowerCAmelCase__ , offset=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
UpperCAmelCase = mask_token_sent
UpperCAmelCase = vocab_file
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
# add special tokens to encoder dict
UpperCAmelCase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
UpperCAmelCase = {v: k for k, v in self.encoder.items()}
@property
def a_ ( self ) -> int:
return len(self.sp_model ) + self.offset
def a_ ( self ) -> Dict[str, int]:
UpperCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self , lowercase_ ) -> Dict:
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self , lowercase_ ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def a_ ( self , lowercase_ ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
UpperCAmelCase = self.sp_model.piece_to_id(lowerCAmelCase__ )
return sp_id + self.offset
def a_ ( self , lowercase_ ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
UpperCAmelCase = self.sp_model.IdToPiece(index - self.offset )
return token
def a_ ( self , lowercase_ ) -> Optional[int]:
UpperCAmelCase = []
UpperCAmelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
UpperCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def a_ ( self , lowercase_=False ) -> int:
return 1
def a_ ( self , lowercase_ ) -> Optional[int]:
UpperCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def a_ ( self , lowercase_ , lowercase_ = None , lowercase_ = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase__ )
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def a_ ( self , lowercase_ , lowercase_=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def a_ ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , 'wb' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 373 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : Dict = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['''MobileViTFeatureExtractor''']
lowerCAmelCase : Optional[Any] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 214 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : List[str] = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class _a ( lowerCAmelCase__):
"""simple docstring"""
UpperCamelCase__ = "roberta-prelayernorm"
def __init__( self : Any , __UpperCamelCase : str=5_0_2_6_5 , __UpperCamelCase : str=7_6_8 , __UpperCamelCase : Dict=1_2 , __UpperCamelCase : Tuple=1_2 , __UpperCamelCase : Dict=3_0_7_2 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : str=5_1_2 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Optional[Any]=0.0_2 , __UpperCamelCase : Any=1e-12 , __UpperCamelCase : List[Any]=1 , __UpperCamelCase : str=0 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : List[Any]="absolute" , __UpperCamelCase : int=True , __UpperCamelCase : Dict=None , **__UpperCamelCase : int , )->str:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class _a ( lowerCAmelCase__):
"""simple docstring"""
@property
def lowercase__ ( self : str )->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 712 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__A : Tuple = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
__A : int = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
__A : Tuple = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
__A : Optional[int] = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
__A : Union[str, Any] = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
for tf_name, hf_name in patterns:
_UpperCAmelCase = k.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return k
def lowercase ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict ):
'''simple docstring'''
_UpperCAmelCase = BigBirdPegasusConfig(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = BigBirdPegasusForConditionalGeneration(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch_model.state_dict()
_UpperCAmelCase = {}
# separating decoder weights
_UpperCAmelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
_UpperCAmelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
_UpperCAmelCase = [k.endswith(_SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE]
if any(_SCREAMING_SNAKE_CASE ):
continue
_UpperCAmelCase = DECODER_PATTERNS
_UpperCAmelCase = rename_state_dict_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
_UpperCAmelCase = v.T
_UpperCAmelCase = torch.from_numpy(_SCREAMING_SNAKE_CASE )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
_UpperCAmelCase = [k.endswith(_SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE]
if any(_SCREAMING_SNAKE_CASE ):
continue
_UpperCAmelCase = REMAINING_PATTERNS
_UpperCAmelCase = rename_state_dict_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
_UpperCAmelCase = v.T
_UpperCAmelCase = torch.from_numpy(_SCREAMING_SNAKE_CASE )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
_UpperCAmelCase = mapping['''model.embed_positions.weight''']
_UpperCAmelCase = mapping.pop('''model.embed_positions.weight''' )
_UpperCAmelCase , _UpperCAmelCase = torch_model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def lowercase ( _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
_UpperCAmelCase = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {}
_UpperCAmelCase = ['''global_step''']
for name, shape in tqdm(_SCREAMING_SNAKE_CASE , desc='''converting tf checkpoint to dict''' ):
_UpperCAmelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCAmelCase = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = array
return tf_weights
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : dict ):
'''simple docstring'''
_UpperCAmelCase = get_tf_weights_as_numpy(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = convert_bigbird_pegasus(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__A : Dict = parser.parse_args()
__A : Tuple = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 95 | 0 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Any = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> List[Any]:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> Tuple:
return ("This is a test", "This is a test")
def a__ ( self ) -> Tuple:
_A : Tuple = """</s>"""
_A : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> Dict:
_A : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_a ) , 1103 )
def a__ ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a__ ( self ) -> List[Any]:
_A : List[str] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Dict = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_A : List[Any] = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> str:
_A : Optional[int] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_A : Any = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_A : Any = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_A : str = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def a__ ( self ) -> int:
_A : Union[str, Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_A : Optional[Any] = """To ensure a smooth flow of bank resolutions."""
_A : Any = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_A : int = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[Any]:
_A : Optional[Any] = ["""This is going to be way too long.""" * 150, """short example"""]
_A : Optional[int] = ["""not super long but more than 5 tokens""", """tiny"""]
_A : Tuple = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[int]:
# fmt: off
_A : Tuple = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : str = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> str:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def a__ ( self , **_a ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> Optional[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_A : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
_A : Optional[Any] = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_A : int = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
_A : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def a__ ( self ) -> Tuple:
_A : List[str] = ["""This is going to be way too long.""" * 1000, """short example"""]
_A : int = ["""not super long but more than 5 tokens""", """tiny"""]
_A : List[Any] = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors="""pt""" )
_A : Optional[int] = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> List[str]:
_A : Dict = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_A : Tuple = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 307 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = KandinskyVaaControlnetImgaImgPipeline
_a = ["image_embeds", "negative_image_embeds", "image", "hint"]
_a = ["image_embeds", "negative_image_embeds", "image", "hint"]
_a = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> Dict:
return 32
@property
def a__ ( self ) -> Tuple:
return 32
@property
def a__ ( self ) -> str:
return self.time_input_dim
@property
def a__ ( self ) -> Optional[int]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> Tuple:
return 100
@property
def a__ ( self ) -> Tuple:
torch.manual_seed(0 )
_A : Any = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_A : Any = UNetaDConditionModel(**_a )
return model
@property
def a__ ( self ) -> Dict:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ ( self ) -> List[str]:
torch.manual_seed(0 )
_A : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self ) -> Union[str, Any]:
_A : str = self.dummy_unet
_A : Any = self.dummy_movq
_A : Dict = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_A : Optional[int] = DDIMScheduler(**_a )
_A : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a__ ( self , _a , _a=0 ) -> Optional[int]:
_A : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_A : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_A : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Tuple = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
# create hint
_A : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
_A : str = torch.manual_seed(_a )
else:
_A : Any = torch.Generator(device=_a ).manual_seed(_a )
_A : Any = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Tuple:
_A : int = """cpu"""
_A : Any = self.get_dummy_components()
_A : List[str] = self.pipeline_class(**_a )
_A : Optional[Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : str = pipe(**self.get_dummy_inputs(_a ) )
_A : Any = output.images
_A : Optional[Any] = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_A : List[Any] = image[0, -3:, -3:, -1]
_A : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A : Optional[Any] = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> Tuple:
_A : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
_A : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_A : int = init_image.resize((512, 512) )
_A : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
_A : Tuple = torch.from_numpy(np.array(_a ) ).float() / 255.0
_A : List[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_A : Optional[int] = """A robot, 4k photo"""
_A : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_A : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
_A : Tuple = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A , _A : Union[str, Any] = pipe_prior(
_a , image=_a , strength=0.85 , generator=_a , negative_prompt="""""" , ).to_tuple()
_A : Optional[Any] = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , hint=_a , generator=_a , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , )
_A : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a , _a )
| 307 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE__ : Any = """BlipImageProcessor"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """AutoTokenizer"""
def __init__( self: int , _lowerCamelCase: int , _lowerCamelCase: Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = False
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.image_processor
def __call__( self: Optional[int] , _lowerCamelCase: Dict = None , _lowerCamelCase: Dict = None , _lowerCamelCase: Tuple = True , _lowerCamelCase: List[str] = False , _lowerCamelCase: int = None , _lowerCamelCase: List[str] = None , _lowerCamelCase: int = 0 , _lowerCamelCase: int = None , _lowerCamelCase: Any = None , _lowerCamelCase: Any = False , _lowerCamelCase: Union[str, Any] = False , _lowerCamelCase: Union[str, Any] = False , _lowerCamelCase: str = False , _lowerCamelCase: Tuple = False , _lowerCamelCase: Tuple = True , _lowerCamelCase: Optional[Any] = None , **_lowerCamelCase: str , ) -> Any:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
SCREAMING_SNAKE_CASE_ = self.tokenizer
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
return text_encoding
# add pixel_values
SCREAMING_SNAKE_CASE_ = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
else:
SCREAMING_SNAKE_CASE_ = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase_ )
return encoding_image_processor
def _A ( self: List[str] , *_lowerCamelCase: List[Any] , **_lowerCamelCase: Optional[int] ) -> Dict:
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self: Any , *_lowerCamelCase: Optional[Any] , **_lowerCamelCase: Optional[Any] ) -> int:
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _A ( self: List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = "realm"
def __init__( self: Tuple , _lowerCamelCase: Union[str, Any]=3_05_22 , _lowerCamelCase: Tuple=7_68 , _lowerCamelCase: str=1_28 , _lowerCamelCase: str=12 , _lowerCamelCase: int=12 , _lowerCamelCase: Union[str, Any]=8 , _lowerCamelCase: Optional[Any]=30_72 , _lowerCamelCase: str="gelu_new" , _lowerCamelCase: str=0.1 , _lowerCamelCase: Union[str, Any]=0.1 , _lowerCamelCase: Optional[int]=5_12 , _lowerCamelCase: Union[str, Any]=2 , _lowerCamelCase: int=0.02 , _lowerCamelCase: Tuple=1E-12 , _lowerCamelCase: List[Any]=2_56 , _lowerCamelCase: Any=10 , _lowerCamelCase: Optional[Any]=1E-3 , _lowerCamelCase: Any=5 , _lowerCamelCase: List[str]=3_20 , _lowerCamelCase: List[str]=13_35_37_18 , _lowerCamelCase: str=50_00 , _lowerCamelCase: str=1 , _lowerCamelCase: str=0 , _lowerCamelCase: Dict=2 , **_lowerCamelCase: Tuple , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
# Common config
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = retriever_proj_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = num_candidates
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = layer_norm_eps
# Reader config
SCREAMING_SNAKE_CASE_ = span_hidden_size
SCREAMING_SNAKE_CASE_ = max_span_width
SCREAMING_SNAKE_CASE_ = reader_layer_norm_eps
SCREAMING_SNAKE_CASE_ = reader_beam_size
SCREAMING_SNAKE_CASE_ = reader_seq_len
# Retrieval config
SCREAMING_SNAKE_CASE_ = num_block_records
SCREAMING_SNAKE_CASE_ = searcher_beam_size
| 89 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_):
if (ksize % 2) == 0:
snake_case_ = ksize + 1
snake_case_ = np.zeros((ksize, ksize) , dtype=np.floataa)
# each value
for y in range(_snake_case):
for x in range(_snake_case):
# distance from center
snake_case_ = x - ksize // 2
snake_case_ = y - ksize // 2
# degree to radiant
snake_case_ = theta / 1_80 * np.pi
snake_case_ = np.cos(_theta)
snake_case_ = np.sin(_theta)
# get kernel x
snake_case_ = cos_theta * px + sin_theta * py
# get kernel y
snake_case_ = -sin_theta * px + cos_theta * py
# fill kernel
snake_case_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2)) * np.cos(2 * np.pi * _x / lambd + psi)
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowercase = imread("../image_data/lena.jpg")
# turn image in gray scale value
lowercase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowercase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowercase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowercase = out / out.max() * 255
lowercase = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 198 |
"""simple docstring"""
_UpperCamelCase = 8.31_44_62 # Unit - J mol-1 K-1
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 341 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCamelCase :
# setable values
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[jnp.ndarray] = None
UpperCAmelCase__ : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def UpperCAmelCase(cls : Optional[Any] ) -> str:
return cls()
@dataclass
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : jnp.ndarray
UpperCAmelCase__ : jnp.ndarray
UpperCAmelCase__ : KarrasVeSchedulerState
class lowerCamelCase ( A_ , A_ ):
@property
def UpperCAmelCase(self : Optional[int] ) -> Any:
return True
@register_to_config
def __init__(self : str , _A : float = 0.02 , _A : float = 1_0_0 , _A : float = 1.0_07 , _A : float = 8_0 , _A : float = 0.05 , _A : float = 5_0 , ) -> int:
pass
def UpperCAmelCase(self : int ) -> Optional[Any]:
return KarrasVeSchedulerState.create()
def UpperCAmelCase(self : Union[str, Any] , _A : KarrasVeSchedulerState , _A : int , _A : Tuple = () ) -> KarrasVeSchedulerState:
snake_case = jnp.arange(0 , _A )[::-1].copy()
snake_case = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_A , schedule=jnp.array(_A , dtype=jnp.floataa ) , timesteps=_A , )
def UpperCAmelCase(self : int , _A : KarrasVeSchedulerState , _A : jnp.ndarray , _A : float , _A : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
snake_case = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
snake_case = 0
# sample eps ~ N(0, S_noise^2 * I)
snake_case = random.split(_A , num=1 )
snake_case = self.config.s_noise * random.normal(key=_A , shape=sample.shape )
snake_case = sigma + gamma * sigma
snake_case = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase(self : int , _A : KarrasVeSchedulerState , _A : jnp.ndarray , _A : float , _A : float , _A : jnp.ndarray , _A : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
snake_case = sample_hat + sigma_hat * model_output
snake_case = (sample_hat - pred_original_sample) / sigma_hat
snake_case = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_A , derivative=_A , state=_A )
def UpperCAmelCase(self : str , _A : KarrasVeSchedulerState , _A : jnp.ndarray , _A : float , _A : float , _A : jnp.ndarray , _A : jnp.ndarray , _A : jnp.ndarray , _A : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
snake_case = sample_prev + sigma_prev * model_output
snake_case = (sample_prev - pred_original_sample) / sigma_prev
snake_case = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_A , derivative=_A , state=_A )
def UpperCAmelCase(self : Optional[Any] , _A : KarrasVeSchedulerState , _A : Optional[Any] , _A : List[Any] , _A : str ) -> List[str]:
raise NotImplementedError()
| 294 |
_A = 0 # The first color of the flag.
_A = 1 # The second color of the flag.
_A = 2 # The third color of the flag.
_A = (red, white, blue)
def lowercase_ ( A__ ) -> list:
"""simple docstring"""
if not sequence:
return []
if len(A__ ) == 1:
return list(A__ )
snake_case = 0
snake_case = len(A__ ) - 1
snake_case = 0
while mid <= high:
if sequence[mid] == colors[0]:
snake_case , snake_case = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
snake_case , snake_case = sequence[high], sequence[mid]
high -= 1
else:
snake_case = F'The elements inside the sequence must contains only {colors} values'
raise ValueError(A__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = input("Enter numbers separated by commas:\n").strip()
_A = [int(item.strip()) for item in user_input.split(",")]
print(f"{dutch_national_flag_sort(unsorted)}")
| 294 | 1 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
print('''Loading config file...''' )
def flatten_yaml_as_dict(lowercase_ , lowercase_="" , lowercase_="." ):
A__ = []
for k, v in d.items():
A__ = parent_key + sep + k if parent_key else k
if isinstance(lowercase_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowercase_ , lowercase_ , sep=lowercase_ ).items() )
else:
items.append((new_key, v) )
return dict(lowercase_ )
A__ = argparse.Namespace()
with open(lowercase_ , '''r''' ) as yaml_file:
try:
A__ = yaml.load(lowercase_ , Loader=yaml.FullLoader )
A__ = flatten_yaml_as_dict(lowercase_ )
for k, v in flat_cfg.items():
setattr(lowercase_ , lowercase_ , lowercase_ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(lowercase_ , str(lowercase_ ) ) )
return config
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = MobileViTVaConfig()
A__ = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
A__ = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
A__ = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
A__ = 151
A__ = 512
A__ = '''ade20k-id2label.json'''
A__ = True
elif task_name.startswith('''voc_''' ):
A__ = 21
A__ = 512
A__ = '''pascal-voc-id2label.json'''
A__ = True
# orig_config
A__ = load_orig_config_file(lowercase_ )
assert getattr(lowercase_ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
A__ = getattr(lowercase_ , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(lowercase_ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
A__ = getattr(lowercase_ , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
A__ = getattr(lowercase_ , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
A__ = getattr(lowercase_ , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
A__ = getattr(lowercase_ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
A__ = getattr(lowercase_ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
A__ = '''huggingface/label-files'''
A__ = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
A__ = {int(lowercase_ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
A__ = dct.pop(lowercase_ )
A__ = val
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Union[str, Any]:
"""simple docstring"""
if base_model:
A__ = ''''''
else:
A__ = '''mobilevitv2.'''
A__ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
A__ = k[8:]
else:
A__ = k
if ".block." in k:
A__ = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
A__ = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
A__ = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
A__ = k_new.replace('''conv_1.''' , f"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if f"""layer_{i}.""" in k:
A__ = k_new.replace(f"""layer_{i}.""" , f"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
A__ = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
A__ = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"""layer_{i}.0.""" in k:
A__ = k_new.replace(f"""layer_{i}.0.""" , f"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if f"""layer_{i}.1.local_rep.0.""" in k:
A__ = k_new.replace(f"""layer_{i}.1.local_rep.0.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if f"""layer_{i}.1.local_rep.1.""" in k:
A__ = k_new.replace(f"""layer_{i}.1.local_rep.1.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
A__ = [0, 1]
elif i == 4:
A__ = [0, 1, 2, 3]
elif i == 5:
A__ = [0, 1, 2]
for j in j_in:
if f"""layer_{i}.1.global_rep.{j}.""" in k:
A__ = k_new.replace(
f"""layer_{i}.1.global_rep.{j}.""" , f"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if f"""layer_{i}.1.global_rep.{j+1}.""" in k:
A__ = k_new.replace(
f"""layer_{i}.1.global_rep.{j+1}.""" , f"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if f"""layer_{i}.1.conv_proj.""" in k:
A__ = k_new.replace(f"""layer_{i}.1.conv_proj.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
A__ = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
A__ = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
A__ = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
A__ = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
A__ = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
A__ = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
A__ = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
A__ = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
A__ = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
"""simple docstring"""
A__ = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(lowercase_ )
for k in keys_to_ignore:
state_dict.pop(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = get_mobilevitva_config(lowercase_ , lowercase_ )
# load original state_dict
A__ = torch.load(lowercase_ , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
A__ = MobileViTVaForSemanticSegmentation(lowercase_ ).eval()
A__ = False
else:
A__ = MobileViTVaForImageClassification(lowercase_ ).eval()
A__ = False
# remove and rename some keys of load the original model
A__ = checkpoint
remove_unused_keys(lowercase_ )
A__ = create_rename_keys(lowercase_ , base_model=lowercase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
# load modified state_dict
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
A__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
A__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
A__ = model(**lowercase_ )
# verify classification model
if task_name.startswith('''imagenet''' ):
A__ = outputs.logits
A__ = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
A__ = torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1] )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCamelCase : int = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 87 |
'''simple docstring'''
import os
import sys
_SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_SCREAMING_SNAKE_CASE = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> int:
return AutoConfig.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> List[str]:
return AutoTokenizer.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return AutoModel.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return AutoModelForCausalLM.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> Dict:
return AutoModelForMaskedLM.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return AutoModelForSequenceClassification.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCamelCase( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) -> str:
return AutoModelForQuestionAnswering.from_pretrained(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
| 366 | 0 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
a__ : Union[str, Any] ='''sshleifer/mar_enro_6_3_student'''
class snake_case ( _UpperCamelCase ):
"""simple docstring"""
def _lowerCamelCase ( self : List[str] ):
super().setUp()
__UpperCamelCase = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=__a , )
__UpperCamelCase = f'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def _lowerCamelCase ( self : Dict ):
MarianMTModel.from_pretrained(__a )
@slow
@require_torch_gpu
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = {
"$MAX_LEN": 6_4,
"$BS": 6_4,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
__UpperCamelCase = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split('finetune.py' )[1].strip()
__UpperCamelCase = bash_script.replace('\\\n' , '' ).strip().replace('\"$@\"' , '' )
for k, v in env_vars_to_replace.items():
__UpperCamelCase = bash_script.replace(__a , str(__a ) )
__UpperCamelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__UpperCamelCase = f'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__UpperCamelCase = ["finetune.py"] + bash_script.split() + args
with patch.object(__a , 'argv' , __a ):
__UpperCamelCase = argparse.ArgumentParser()
__UpperCamelCase = pl.Trainer.add_argparse_args(__a )
__UpperCamelCase = SummarizationModule.add_model_specific_args(__a , os.getcwd() )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = main(__a )
# Check metrics
__UpperCamelCase = load_json(model.metrics_save_path )
__UpperCamelCase = metrics["val"][0]
__UpperCamelCase = metrics["val"][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''] , __a )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 1_7 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__UpperCamelCase = os.listdir(__a )
__UpperCamelCase = [x for x in contents if x.endswith('.ckpt' )][0]
__UpperCamelCase = os.path.join(args.output_dir , __a )
__UpperCamelCase = torch.load(__a , map_location='cpu' )
__UpperCamelCase = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__UpperCamelCase = {os.path.basename(__a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class snake_case ( _UpperCamelCase ):
"""simple docstring"""
@timeout_decorator.timeout(6_0_0 )
@slow
@require_torch_gpu
def _lowerCamelCase ( self : str ):
__UpperCamelCase = f'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
__UpperCamelCase = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 1_2_8,
"$BS": 1_6,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
__UpperCamelCase = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split('distillation.py' )[1].strip()
)
__UpperCamelCase = bash_script.replace('\\\n' , '' ).strip().replace('\"$@\"' , '' )
__UpperCamelCase = bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
__UpperCamelCase = bash_script.replace(__a , str(__a ) )
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = bash_script.replace('--fp16' , '' )
__UpperCamelCase = 6
__UpperCamelCase = (
["distillation.py"]
+ bash_script.split()
+ [
f'''--output_dir={output_dir}''',
"--gpus=1",
"--learning_rate=1e-3",
f'''--num_train_epochs={epochs}''',
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(__a , 'argv' , __a ):
__UpperCamelCase = argparse.ArgumentParser()
__UpperCamelCase = pl.Trainer.add_argparse_args(__a )
__UpperCamelCase = SummarizationDistiller.add_model_specific_args(__a , os.getcwd() )
__UpperCamelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__UpperCamelCase = distill_main(__a )
# Check metrics
__UpperCamelCase = load_json(model.metrics_save_path )
__UpperCamelCase = metrics["val"][0]
__UpperCamelCase = metrics["val"][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''] , __a )
# check lightning ckpt can be loaded and has a reasonable statedict
__UpperCamelCase = os.listdir(__a )
__UpperCamelCase = [x for x in contents if x.endswith('.ckpt' )][0]
__UpperCamelCase = os.path.join(args.output_dir , __a )
__UpperCamelCase = torch.load(__a , map_location='cpu' )
__UpperCamelCase = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__UpperCamelCase = {os.path.basename(__a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 707 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def lowercase__ ( __lowercase : SplitDict ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = split_dict._to_yaml_list()
assert len(__lowercase ) == len(__lowercase )
__UpperCamelCase = SplitDict._from_yaml_list(__lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
__UpperCamelCase = None
# the split name of split_dict takes over the name of the split info object
__UpperCamelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=__lowercase ), SplitInfo(dataset_name='my_dataset' )] )
def lowercase__ ( __lowercase : List[str] ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 434 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 688 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = LxmertTokenizer
__UpperCamelCase = LxmertTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def __lowerCAmelCase ( self : str ) -> str:
'''simple docstring'''
super().setUp()
a__ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : int , A__ : int ) -> int:
'''simple docstring'''
a__ : List[Any] = '''UNwant\u00E9d,running'''
a__ : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
a__ : Optional[int] = self.tokenizer_class(self.vocab_file )
a__ : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [7, 4, 5, 1_0, 8, 9] )
def __lowerCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__ : Union[str, Any] = self.get_tokenizer()
a__ : Union[str, Any] = self.get_rust_tokenizer()
a__ : str = '''I was born in 92000, and this is falsé.'''
a__ : Tuple = tokenizer.tokenize(A__ )
a__ : Tuple = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
a__ : Optional[int] = tokenizer.encode(A__ , add_special_tokens=A__ )
a__ : Optional[Any] = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
a__ : List[str] = self.get_rust_tokenizer()
a__ : str = tokenizer.encode(A__ )
a__ : int = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
| 688 | 1 |
'''simple docstring'''
__lowerCamelCase : Any = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__lowerCamelCase : str = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__lowerCamelCase : List[str] = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__lowerCamelCase : Union[str, Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__lowerCamelCase : str = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__lowerCamelCase : Optional[int] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__lowerCamelCase : Optional[Any] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__lowerCamelCase : Union[str, Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 700 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = False
snake_case_ : int = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
snake_case_ : Any = True
elif "IPython" in sys.modules:
snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
snake_case_ : Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
snake_case_ : Tuple = 8
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__magic_name__ )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ):
snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case_ : Any = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,):
snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ )
start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
| 656 | 0 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self : Optional[int], __lowercase : Optional[int], __lowercase : List[Any]=3, __lowercase : List[str]=32, __lowercase : Dict=3, __lowercase : int=10, __lowercase : List[Any]=[10, 20, 30, 40], __lowercase : List[str]=[1, 1, 2, 1], __lowercase : Dict=True, __lowercase : List[Any]=True, __lowercase : List[str]="relu", __lowercase : Any=3, __lowercase : int=None, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(__lowercase )
def A__ ( self : List[str] ):
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self : List[str] ):
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, )
def A__ ( self : Tuple, __lowercase : Optional[int], __lowercase : List[str], __lowercase : Optional[Any] ):
lowercase__ = TFRegNetModel(config=__lowercase )
lowercase__ = model(__lowercase, training=__lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def A__ ( self : Optional[int], __lowercase : Tuple, __lowercase : int, __lowercase : List[str] ):
lowercase__ = self.num_labels
lowercase__ = TFRegNetForImageClassification(__lowercase )
lowercase__ = model(__lowercase, labels=__lowercase, training=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def A__ ( self : Dict ):
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ : int =(TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
UpperCamelCase__ : str =(
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : List[Any] =False
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : List[str] =False
def A__ ( self : Dict ):
lowercase__ = TFRegNetModelTester(self )
lowercase__ = ConfigTester(self, config_class=__lowercase, has_text_modality=__lowercase )
def A__ ( self : Any ):
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def A__ ( self : int ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", )
@slow
def A__ ( self : Tuple ):
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def A__ ( self : List[Any] ):
pass
def A__ ( self : Dict ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(__lowercase )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], __lowercase )
def A__ ( self : List[str] ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def A__ ( self : Optional[Any] ):
def check_hidden_states_output(__lowercase : Tuple, __lowercase : str, __lowercase : Dict ):
lowercase__ = model_class(__lowercase )
lowercase__ = model(**self._prepare_for_class(__lowercase, __lowercase ), training=__lowercase )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(__lowercase ), expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ = layer_type
lowercase__ = True
check_hidden_states_output(__lowercase, __lowercase, __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(__lowercase, __lowercase, __lowercase )
def A__ ( self : List[str] ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__lowercase : Optional[int], __lowercase : Optional[int], __lowercase : str, __lowercase : List[str]={} ):
lowercase__ = model(__lowercase, return_dict=__lowercase, **__lowercase )
lowercase__ = model(__lowercase, return_dict=__lowercase, **__lowercase ).to_tuple()
def recursive_check(__lowercase : List[str], __lowercase : List[str] ):
if isinstance(__lowercase, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__lowercase, __lowercase ):
recursive_check(__lowercase, __lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__lowercase, __lowercase ) ), msg=(
"Tuple and dict output are not equal. Difference:"
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
), )
recursive_check(__lowercase, __lowercase )
for model_class in self.all_model_classes:
lowercase__ = model_class(__lowercase )
lowercase__ = self._prepare_for_class(__lowercase, __lowercase )
lowercase__ = self._prepare_for_class(__lowercase, __lowercase )
check_equivalence(__lowercase, __lowercase, __lowercase )
lowercase__ = self._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase )
lowercase__ = self._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase )
check_equivalence(__lowercase, __lowercase, __lowercase )
lowercase__ = self._prepare_for_class(__lowercase, __lowercase )
lowercase__ = self._prepare_for_class(__lowercase, __lowercase )
check_equivalence(__lowercase, __lowercase, __lowercase, {"output_hidden_states": True} )
lowercase__ = self._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase )
lowercase__ = self._prepare_for_class(__lowercase, __lowercase, return_labels=__lowercase )
check_equivalence(__lowercase, __lowercase, __lowercase, {"output_hidden_states": True} )
def A__ ( self : Any ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def A__ ( self : Dict ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFRegNetModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __lowerCAmelCase ( ):
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _snake_case ( unittest.TestCase):
@cached_property
def A__ ( self : int ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A__ ( self : List[Any] ):
lowercase__ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=__lowercase, return_tensors="tf" )
# forward pass
lowercase__ = model(**__lowercase, training=__lowercase )
# verify the logits
lowercase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, __lowercase )
lowercase__ = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3], __lowercase, atol=1e-4 )
| 413 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class _snake_case ( lowercase__):
UpperCamelCase__ : List[str] =ComputeEnvironment.AMAZON_SAGEMAKER
UpperCamelCase__ : Tuple =True
UpperCamelCase__ : int ="""ml.p3.2xlarge"""
UpperCamelCase__ : Any ="""accelerate_sagemaker_execution_role"""
UpperCamelCase__ : Dict ="""hf-sm"""
UpperCamelCase__ : Optional[int] ="""us-east-1"""
UpperCamelCase__ : Optional[Any] =1
UpperCamelCase__ : int ="""accelerate-sagemaker-1"""
UpperCamelCase__ : Union[str, Any] ="""1.6"""
UpperCamelCase__ : str ="""4.4"""
UpperCamelCase__ : str ="""train.py"""
UpperCamelCase__ : Union[str, Any] =[
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
UpperCamelCase__ : str =[
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class _snake_case ( unittest.TestCase):
def A__ ( self : Optional[int] ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowercase__ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"], __lowercase )
assert isinstance(converted_args["do_train"], __lowercase )
assert isinstance(converted_args["epochs"], __lowercase )
assert isinstance(converted_args["learning_rate"], __lowercase )
assert isinstance(converted_args["max_steps"], __lowercase )
with pytest.raises(__lowercase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 413 | 1 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : bool = True , _UpperCamelCase : float = math.inf , _UpperCamelCase : float = -math.inf , _UpperCamelCase : float = math.inf , _UpperCamelCase : float = -math.inf , _UpperCamelCase : bool = False , _UpperCamelCase : float = 1_0_0 , _UpperCamelCase : float = 0.01 , _UpperCamelCase : float = 1 , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : int = False
__UpperCAmelCase : List[Any] = search_prob
__UpperCAmelCase : str = start_temperate
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Union[str, Any] = None
while not search_end:
__UpperCAmelCase : List[Any] = current_state.score()
if best_state is None or current_score > best_state.score():
__UpperCAmelCase : Dict = current_state
scores.append(_UpperCamelCase )
iterations += 1
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : List[Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__UpperCAmelCase : Union[str, Any] = random.randint(0 , len(_UpperCamelCase ) - 1 ) # picking a random neighbor
__UpperCAmelCase : Dict = neighbors.pop(_UpperCamelCase )
__UpperCAmelCase : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__UpperCAmelCase : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__UpperCAmelCase : Optional[Any] = picked_neighbor
else:
__UpperCAmelCase : List[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__UpperCAmelCase : Optional[int] = picked_neighbor
__UpperCAmelCase : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__UpperCAmelCase : List[str] = True
else:
__UpperCAmelCase : str = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_UpperCamelCase ) , _UpperCamelCase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple ) -> List[str]:
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase : int = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase : Any = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase : Any = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase : int = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : str ) -> Any:
'''simple docstring'''
return (3 * x**2) - (6 * y)
UpperCAmelCase : Optional[int] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"{local_min.score()}"
)
UpperCAmelCase : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase : Tuple = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"{local_min.score()}"
)
| 299 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCAmelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : List[Any] , **UpperCamelCase : int ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(UpperCamelCase )
def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : str = {}
# preprocess args
if "points_per_batch" in kwargs:
__UpperCAmelCase : Optional[Any] = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
__UpperCAmelCase : Optional[Any] = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
__UpperCAmelCase : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
__UpperCAmelCase : Any = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
__UpperCAmelCase : Any = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
__UpperCAmelCase : Dict = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
__UpperCAmelCase : Union[str, Any] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
__UpperCAmelCase : Dict = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
__UpperCAmelCase : int = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
__UpperCAmelCase : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
__UpperCAmelCase : Optional[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
__UpperCAmelCase : str = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : str , UpperCamelCase : Optional[int] , *UpperCamelCase : int , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=None , **UpperCamelCase : str ):
'''simple docstring'''
return super().__call__(UpperCamelCase , *UpperCamelCase , num_workers=UpperCamelCase , batch_size=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int]=64 , UpperCamelCase : int = 0 , UpperCamelCase : float = 512 / 1_500 , UpperCamelCase : Optional[int] = 32 , UpperCamelCase : Optional[int] = 1 , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = load_image(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = self.image_processor.size["""longest_edge"""]
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = self.image_processor.generate_crop_boxes(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Any = self.image_processor(images=UpperCamelCase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
__UpperCAmelCase : Union[str, Any] = self.get_inference_context()
with inference_context():
__UpperCAmelCase : Tuple = self._ensure_tensor_on_device(UpperCamelCase , device=self.device )
__UpperCAmelCase : List[str] = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
__UpperCAmelCase : Optional[int] = image_embeddings
__UpperCAmelCase : List[str] = grid_points.shape[1]
__UpperCAmelCase : Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : str = grid_points[:, i : i + points_per_batch, :, :]
__UpperCAmelCase : Union[str, Any] = input_labels[:, i : i + points_per_batch]
__UpperCAmelCase : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowerCamelCase__ ( self : str , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any]=0.88 , UpperCamelCase : Union[str, Any]=0.95 , UpperCamelCase : int=0 , UpperCamelCase : Optional[int]=1 , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = model_inputs.pop("""input_boxes""" )
__UpperCAmelCase : Tuple = model_inputs.pop("""is_last""" )
__UpperCAmelCase : List[Any] = model_inputs.pop("""original_sizes""" ).tolist()
__UpperCAmelCase : Optional[int] = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
__UpperCAmelCase : str = self.model(**UpperCamelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__UpperCAmelCase : Dict = model_outputs["""pred_masks"""]
__UpperCAmelCase : Union[str, Any] = self.image_processor.post_process_masks(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , binarize=UpperCamelCase )
__UpperCAmelCase : Optional[int] = model_outputs["""iou_scores"""]
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowerCamelCase__ ( self : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str]=False , UpperCamelCase : int=False , UpperCamelCase : int=0.7 , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : int = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
__UpperCAmelCase : List[Any] = torch.cat(UpperCamelCase )
__UpperCAmelCase : List[str] = torch.cat(UpperCamelCase )
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = self.image_processor.post_process_for_mask_generation(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : List[Any] = defaultdict(UpperCamelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCamelCase )
__UpperCAmelCase : int = {}
if output_rle_mask:
__UpperCAmelCase : Union[str, Any] = rle_mask
if output_bboxes_mask:
__UpperCAmelCase : int = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 299 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :str = TextToVideoSDPipeline
__magic_name__ :Dict = TEXT_TO_IMAGE_PARAMS
__magic_name__ :List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__magic_name__ :Optional[int] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :List[str] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , )
lowerCAmelCase__ :int = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase__ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
lowerCAmelCase__ :str = CLIPTextModel(_a )
lowerCAmelCase__ :str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase__ :Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(_a ).startswith('mps' ):
lowerCAmelCase__ :List[str] = torch.manual_seed(_a )
else:
lowerCAmelCase__ :Dict = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase__ :Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ :List[str] = self.get_dummy_components()
lowerCAmelCase__ :Optional[Any] = TextToVideoSDPipeline(**_a )
lowerCAmelCase__ :Union[str, Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase__ :List[str] = self.get_dummy_inputs(_a )
lowerCAmelCase__ :Optional[Any] = """np"""
lowerCAmelCase__ :Optional[Any] = sd_pipe(**_a ).frames
lowerCAmelCase__ :Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
lowerCAmelCase__ :Any = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_a , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_a , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
lowerCAmelCase__ :int = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
lowerCAmelCase__ :Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase__ :List[Any] = pipe.to('cuda' )
lowerCAmelCase__ :Any = """Spiderman is surfing"""
lowerCAmelCase__ :List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ :Optional[Any] = pipe(_a , generator=_a , num_inference_steps=2_5 , output_type='pt' ).frames
lowerCAmelCase__ :Optional[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
lowerCAmelCase__ :int = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
lowerCAmelCase__ :Optional[int] = pipe.to('cuda' )
lowerCAmelCase__ :str = """Spiderman is surfing"""
lowerCAmelCase__ :Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ :Dict = pipe(_a , generator=_a , num_inference_steps=2 , output_type='pt' ).frames
lowerCAmelCase__ :Tuple = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 93 |
import torch
from diffusers import StableDiffusionPipeline
_snake_case = "path-to-your-trained-model"
_snake_case = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
_snake_case = "A photo of sks dog in a bucket"
_snake_case = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 307 | 0 |
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str , __lowerCamelCase: str ):
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
lowercase_ = []
for i in range(len(__lowerCamelCase ) - pat_len + 1 ):
lowercase_ = True
for j in range(__lowerCamelCase ):
if s[i + j] != pattern[j]:
lowercase_ = False
break
if match_found:
position.append(__lowerCamelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC"""))
| 714 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = AutoencoderKL
lowerCAmelCase__ = "sample"
lowerCAmelCase__ = 1E-2
@property
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = 4
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
return {"sample": image}
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return (3, 32, 32)
@property
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return (3, 32, 32)
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
lowercase_ = self.dummy_input
return init_dict, inputs_dict
def A__ ( self ) -> Any:
'''simple docstring'''
pass
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.prepare_init_args_and_inputs_for_common()
lowercase_ = self.model_class(**UpperCAmelCase )
model.to(UpperCAmelCase )
assert not model.is_gradient_checkpointing and model.training
lowercase_ = model(**UpperCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowercase_ = torch.randn_like(UpperCAmelCase )
lowercase_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowercase_ = self.model_class(**UpperCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(UpperCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowercase_ = model_a(**UpperCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowercase_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
lowercase_ = dict(model.named_parameters() )
lowercase_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase )
lowercase_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
lowercase_ = model.to(UpperCAmelCase )
model.eval()
if torch_device == "mps":
lowercase_ = torch.manual_seed(0 )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase_ = image.to(UpperCAmelCase )
with torch.no_grad():
lowercase_ = model(UpperCAmelCase , sample_posterior=UpperCAmelCase , generator=UpperCAmelCase ).sample
lowercase_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowercase_ = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
lowercase_ = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
lowercase_ = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-2 ) )
@slow
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return F'gaussian_noise_s={seed}_shape={"_".join([str(UpperCAmelCase ) for s in shape] )}.npy'
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self , UpperCAmelCase=0 , UpperCAmelCase=(4, 3, 512, 512) , UpperCAmelCase=False ) -> str:
'''simple docstring'''
lowercase_ = torch.floataa if fpaa else torch.floataa
lowercase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCAmelCase , UpperCAmelCase ) ) ).to(UpperCAmelCase ).to(UpperCAmelCase )
return image
def A__ ( self , UpperCAmelCase="CompVis/stable-diffusion-v1-4" , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
lowercase_ = "fp16" if fpaa else None
lowercase_ = torch.floataa if fpaa else torch.floataa
lowercase_ = AutoencoderKL.from_pretrained(
UpperCAmelCase , subfolder="vae" , torch_dtype=UpperCAmelCase , revision=UpperCAmelCase , )
model.to(UpperCAmelCase ).eval()
return model
def A__ ( self , UpperCAmelCase=0 ) -> int:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(UpperCAmelCase )
return torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCAmelCase )
lowercase_ = self.get_generator(UpperCAmelCase )
with torch.no_grad():
lowercase_ = model(UpperCAmelCase , generator=UpperCAmelCase , sample_posterior=UpperCAmelCase ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCAmelCase )
lowercase_ = self.get_sd_image(UpperCAmelCase , fpaa=UpperCAmelCase )
lowercase_ = self.get_generator(UpperCAmelCase )
with torch.no_grad():
lowercase_ = model(UpperCAmelCase , generator=UpperCAmelCase , sample_posterior=UpperCAmelCase ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase_ = torch.tensor(UpperCAmelCase )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCAmelCase )
with torch.no_grad():
lowercase_ = model(UpperCAmelCase ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase_ = model.decode(UpperCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase_ = sample[-1, -2:, :2, -2:].flatten().cpu()
lowercase_ = torch.tensor(UpperCAmelCase )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCAmelCase )
lowercase_ = self.get_sd_image(UpperCAmelCase , shape=(3, 4, 64, 64) , fpaa=UpperCAmelCase )
with torch.no_grad():
lowercase_ = model.decode(UpperCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase_ = torch.tensor(UpperCAmelCase )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def A__ ( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCAmelCase )
lowercase_ = self.get_sd_image(UpperCAmelCase , shape=(3, 4, 64, 64) , fpaa=UpperCAmelCase )
with torch.no_grad():
lowercase_ = model.decode(UpperCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase_ = model.decode(UpperCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase_ = model.decode(UpperCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase_ = model.decode(UpperCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCAmelCase )
lowercase_ = self.get_generator(UpperCAmelCase )
with torch.no_grad():
lowercase_ = model.encode(UpperCAmelCase ).latent_dist
lowercase_ = dist.sample(generator=UpperCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowercase_ = sample[0, -1, -3:, -3:].flatten().cpu()
lowercase_ = torch.tensor(UpperCAmelCase )
lowercase_ = 3e-3 if torch_device != "mps" else 1e-2
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase )
| 601 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(snake_case , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(snake_case , 'num_attention_heads' ) )
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=32 , snake_case=2 , snake_case=3 , snake_case=640 , snake_case=4 , snake_case="silu" , snake_case=3 , snake_case=32 , snake_case=0.1 , snake_case=0.1 , snake_case=0.1 , snake_case=0.02 , snake_case=True , snake_case=True , snake_case=10 , snake_case=None , ) -> Tuple:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = last_hidden_size
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = output_stride
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = classifier_dropout_prob
_UpperCAmelCase = use_labels
_UpperCAmelCase = is_training
_UpperCAmelCase = num_labels
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self ) -> Union[str, Any]:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case ) -> Any:
_UpperCAmelCase = MobileViTModel(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case ) -> str:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileViTForImageClassification(snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case ) -> Any:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileViTForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCAmelCase = model(snake_case , labels=snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( A, A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = MobileViTModelTester(self )
_UpperCAmelCase = MobileViTConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def lowerCamelCase_ ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowerCamelCase_ ( self ) -> str:
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowerCamelCase_ ( self ) -> Tuple:
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowerCamelCase_ ( self ) -> Any:
pass
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase_ ( self ) -> Optional[int]:
pass
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
def check_hidden_states_output(snake_case , snake_case , snake_case ):
_UpperCAmelCase = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = 5
self.assertEqual(len(snake_case ) , snake_case )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_UpperCAmelCase = 2
for i in range(len(snake_case ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
@slow
def lowerCamelCase_ ( self ) -> Any:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = MobileViTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self ) -> Any:
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(snake_case )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**snake_case )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case )
_UpperCAmelCase = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
_UpperCAmelCase = model.to(snake_case )
_UpperCAmelCase = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**snake_case )
_UpperCAmelCase = outputs.logits
# verify the logits
_UpperCAmelCase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , snake_case )
_UpperCAmelCase = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
_UpperCAmelCase = model.to(snake_case )
_UpperCAmelCase = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**snake_case )
_UpperCAmelCase = outputs.logits.detach().cpu()
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=snake_case , target_sizes=[(50, 60)] )
_UpperCAmelCase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , snake_case )
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=snake_case )
_UpperCAmelCase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , snake_case )
| 573 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''efficientformer'''
def __init__( self , snake_case = [3, 2, 6, 4] , snake_case = [48, 96, 224, 448] , snake_case = [True, True, True, True] , snake_case = 448 , snake_case = 32 , snake_case = 4 , snake_case = 7 , snake_case = 5 , snake_case = 8 , snake_case = 4 , snake_case = 0.0 , snake_case = 16 , snake_case = 3 , snake_case = 3 , snake_case = 3 , snake_case = 2 , snake_case = 1 , snake_case = 0.0 , snake_case = 1 , snake_case = True , snake_case = True , snake_case = 1E-5 , snake_case = "gelu" , snake_case = 0.02 , snake_case = 1E-12 , snake_case = 224 , snake_case = 1E-05 , **snake_case , ) -> None:
super().__init__(**snake_case )
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_expansion_ratio
_UpperCAmelCase = downsamples
_UpperCAmelCase = dim
_UpperCAmelCase = key_dim
_UpperCAmelCase = attention_ratio
_UpperCAmelCase = resolution
_UpperCAmelCase = pool_size
_UpperCAmelCase = downsample_patch_size
_UpperCAmelCase = downsample_stride
_UpperCAmelCase = downsample_pad
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = num_metaad_blocks
_UpperCAmelCase = distillation
_UpperCAmelCase = use_layer_scale
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = image_size
_UpperCAmelCase = batch_norm_eps
| 573 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , a , a=7 , a=3 , a=18 , a=30 , a=400 , a=True , a=None , a=True , a=None , a=True , a=[0.5, 0.5, 0.5] , a=[0.5, 0.5, 0.5] , ) -> Dict:
SCREAMING_SNAKE_CASE = size if size is not None else {'shortest_edge': 18}
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_center_crop
SCREAMING_SNAKE_CASE = crop_size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _snake_case ( A__ , unittest.TestCase ):
_lowercase : List[Any] = LevitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = LevitImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a , 'image_mean'))
self.assertTrue(hasattr(a , 'image_std'))
self.assertTrue(hasattr(a , 'do_normalize'))
self.assertTrue(hasattr(a , 'do_resize'))
self.assertTrue(hasattr(a , 'do_center_crop'))
self.assertTrue(hasattr(a , 'size'))
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18})
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18})
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'shortest_edge': 42})
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84})
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=a)
for image in image_inputs:
self.assertIsInstance(a , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a)
for image in image_inputs:
self.assertIsInstance(a , np.ndarray)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a)
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 444 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a_ : List[str] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase__ (_UpperCAmelCase):
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested')
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested')
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested')
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment')
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate')
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule')
def lowerCamelCase__ (_UpperCAmelCase):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase):
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE = terminalreporter.config.getoption('--make-reports')
if make_reports:
pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
SCREAMING_SNAKE_CASE = 0
# Doctest custom flag to ignore output.
a_ : List[Any] = doctest.register_optionflag('IGNORE_RESULT')
a_ : Any = doctest.OutputChecker
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> List[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a)
a_ : int = CustomOutputChecker
a_ : List[str] = HfDoctestModule
a_ : Optional[int] = HfDocTestParser
| 444 | 1 |
import numpy as np
_snake_case : str = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class a :
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
__snake_case : Optional[int] = np.array(lowerCamelCase )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : str ) -> np.ndarray:
__snake_case , __snake_case : Optional[int] = np.where(letter == self.SQUARE )
__snake_case : Union[str, Any] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : int ) -> str:
__snake_case : Optional[int] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self : Union[str, Any] , lowerCamelCase : str ) -> str:
__snake_case : Dict = message.lower()
__snake_case : List[Any] = message.replace(" " , "" )
__snake_case : List[Any] = message.replace("j" , "i" )
__snake_case : Optional[Any] = np.empty((2, len(lowerCamelCase )) )
for letter_index in range(len(lowerCamelCase ) ):
__snake_case : List[Any] = self.letter_to_numbers(message[letter_index] )
__snake_case : Any = numbers[0]
__snake_case : Dict = numbers[1]
__snake_case : Optional[Any] = first_step.reshape(2 * len(lowerCamelCase ) )
__snake_case : str = ""
for numbers_index in range(len(lowerCamelCase ) ):
__snake_case : str = int(second_step[numbers_index * 2] )
__snake_case : List[Any] = int(second_step[(numbers_index * 2) + 1] )
__snake_case : Optional[Any] = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__snake_case : Union[str, Any] = encoded_message + letter
return encoded_message
def __snake_case ( self : Any , lowerCamelCase : str ) -> str:
__snake_case : Tuple = message.lower()
message.replace(" " , "" )
__snake_case : Dict = np.empty(2 * len(lowerCamelCase ) )
for letter_index in range(len(lowerCamelCase ) ):
__snake_case : str = self.letter_to_numbers(message[letter_index] )
__snake_case : Dict = numbers[0]
__snake_case : Union[str, Any] = numbers[1]
__snake_case : int = first_step.reshape((2, len(lowerCamelCase )) )
__snake_case : List[Any] = ""
for numbers_index in range(len(lowerCamelCase ) ):
__snake_case : List[Any] = int(second_step[0, numbers_index] )
__snake_case : Optional[Any] = int(second_step[1, numbers_index] )
__snake_case : str = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__snake_case : List[Any] = decoded_message + letter
return decoded_message
| 81 |
'''simple docstring'''
import operator as op
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
snake_case_ = []
snake_case_ = lambda __UpperCAmelCase, __UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
snake_case_ = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ), '''Action'''.center(12 ), '''Stack''', sep=''' | ''' )
print('''-''' * (30 + len(__UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ('''push(''' + x + ''')''').ljust(12 ), ''','''.join(__UpperCAmelCase ), sep=''' | ''' )
else:
snake_case_ = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ), ('''pop(''' + b + ''')''').ljust(12 ), ''','''.join(__UpperCAmelCase ), sep=''' | ''' )
snake_case_ = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ), ('''pop(''' + a + ''')''').ljust(12 ), ''','''.join(__UpperCAmelCase ), sep=''' | ''' )
stack.append(
str(opr[x](int(__UpperCAmelCase ), int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ('''push(''' + a + x + b + ''')''').ljust(12 ), ''','''.join(__UpperCAmelCase ), sep=''' | ''', )
return int(stack[0] )
if __name__ == "__main__":
a : Any = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 640 | 0 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _snake_case ( a__ ):
snake_case__ = ["image_processor", "tokenizer"]
snake_case__ = "BlipImageProcessor"
snake_case__ = "AutoTokenizer"
def __init__( self : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Tuple ):
super().__init__(UpperCAmelCase , UpperCAmelCase )
# add QFormer tokenizer
__lowerCamelCase : str = qformer_tokenizer
def __call__( self : Optional[int] , UpperCAmelCase : ImageInput = None , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : str , ):
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
__lowerCamelCase : Any = BatchFeature()
if text is not None:
__lowerCamelCase : Optional[Any] = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
encoding.update(UpperCAmelCase )
__lowerCamelCase : List[str] = self.qformer_tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
__lowerCamelCase : Any = qformer_text_encoding.pop("input_ids" )
__lowerCamelCase : Dict = qformer_text_encoding.pop("attention_mask" )
if images is not None:
__lowerCamelCase : int = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase )
encoding.update(UpperCAmelCase )
return encoding
def lowerCamelCase__ ( self : str , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : str ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Tuple ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Dict = self.tokenizer.model_input_names
__lowerCamelCase : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[Any] ):
if os.path.isfile(UpperCAmelCase ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
__lowerCamelCase : List[str] = os.path.join(UpperCAmelCase , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase )
return super().save_pretrained(UpperCAmelCase , **UpperCAmelCase )
@classmethod
def lowerCamelCase__ ( cls : Any , UpperCAmelCase : Any , **UpperCAmelCase : int ):
__lowerCamelCase : Dict = AutoTokenizer.from_pretrained(UpperCAmelCase , subfolder="qformer_tokenizer" )
__lowerCamelCase : Optional[Any] = cls._get_arguments_from_pretrained(UpperCAmelCase , **UpperCAmelCase )
args.append(UpperCAmelCase )
return cls(*UpperCAmelCase ) | 706 | """simple docstring"""
def lowercase_ ( _lowerCamelCase: int = 600851475143 ) -> int:
'''simple docstring'''
try:
__lowerCamelCase : Optional[Any] = int(_lowerCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowerCamelCase : Union[str, Any] = 2
__lowerCamelCase : int = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCamelCase : Dict = i
while n % i == 0:
__lowerCamelCase : Union[str, Any] = n // i
i += 1
return int(_lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""") | 366 | 0 |
'''simple docstring'''
_A: Any = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
_A: Union[str, Any] = ['''a''', '''b''', '''c''', '''d''', '''e''']
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> Dict:
__UpperCAmelCase = start
# add current to visited
visited.append(_lowerCAmelCase )
__UpperCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__UpperCAmelCase = topological_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(_lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
__UpperCAmelCase = topological_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
_A: List[str] = topological_sort("""a""", [], [])
print(sort)
| 126 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCamelCase ( _lowerCAmelCase : int ) -> list[str]:
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Any = 11
_UpperCAmelCase : Tuple = int("""1""" + """0""" * digit_len )
for num in range(_lowerCAmelCase, _lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCAmelCase, _lowerCAmelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
_UpperCAmelCase : List[str] = 10
return solutions
def UpperCamelCase ( _lowerCAmelCase : int = 2 ) -> int:
_UpperCAmelCase : Dict = 1.0
for fraction in fraction_list(_lowerCAmelCase ):
_UpperCAmelCase : Tuple = Fraction(_lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 238 | 0 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase ( __lowerCamelCase ):
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = SMALL_MODEL_IDENTIFIER
_SCREAMING_SNAKE_CASE = 'pt'
_SCREAMING_SNAKE_CASE = 'tf'
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCAmelCase_ )
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCAmelCase_ )
model_tf.save_pretrained(UpperCAmelCase_ )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = 'mock_framework'
# Framework provided - return whatever the user provides
_SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(self.test_model , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(UpperCAmelCase_ )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = MagicMock(return_value=UpperCAmelCase_ )
with patch("transformers.onnx.features.is_tf_available" , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCAmelCase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_SCREAMING_SNAKE_CASE = MagicMock(return_value=UpperCAmelCase_ )
with patch("transformers.onnx.features.is_torch_available" , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCAmelCase_ , self.framework_tf )
# Both in environment -> use PyTorch
_SCREAMING_SNAKE_CASE = MagicMock(return_value=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = MagicMock(return_value=UpperCAmelCase_ )
with patch("transformers.onnx.features.is_tf_available" , UpperCAmelCase_ ), patch(
"transformers.onnx.features.is_torch_available" , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCAmelCase_ , self.framework_pt )
# Both not in environment -> raise error
_SCREAMING_SNAKE_CASE = MagicMock(return_value=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = MagicMock(return_value=UpperCAmelCase_ )
with patch("transformers.onnx.features.is_tf_available" , UpperCAmelCase_ ), patch(
"transformers.onnx.features.is_torch_available" , UpperCAmelCase_ ):
with self.assertRaises(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = FeaturesManager.determine_framework(self.test_model ) | 706 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCAmelCase ( __UpperCAmelCase ):
a : Dict = DistilBertTokenizer
a : Tuple = DistilBertTokenizerFast
a : List[str] = True
@slow
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("sequence builders" , add_special_tokens=UpperCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
] | 493 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case :Optional[Any] ={"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[Any] =["""YolosFeatureExtractor"""]
__snake_case :Union[str, Any] =["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[Any] =[
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__snake_case :Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 106 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self , __A , __A=7 , __A=3 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=1 / 255 , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , __A=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = min_resolution
__UpperCAmelCase = max_resolution
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = do_rescale
__UpperCAmelCase = rescale_factor
__UpperCAmelCase = do_normalize
__UpperCAmelCase = image_mean
__UpperCAmelCase = image_std
__UpperCAmelCase = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , __A , __A=False ):
if not batched:
__UpperCAmelCase = image_inputs[0]
if isinstance(__A , Image.Image ):
__UpperCAmelCase , __UpperCAmelCase = image.size
else:
__UpperCAmelCase , __UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
__UpperCAmelCase = int(self.size['shortest_edge'] * h / w )
__UpperCAmelCase = self.size['shortest_edge']
elif w > h:
__UpperCAmelCase = self.size['shortest_edge']
__UpperCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
__UpperCAmelCase = self.size['shortest_edge']
__UpperCAmelCase = self.size['shortest_edge']
else:
__UpperCAmelCase = []
for image in image_inputs:
__UpperCAmelCase , __UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCAmelCase = max(__A , key=lambda __A : item[0] )[0]
__UpperCAmelCase = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
_A : Any = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__UpperCAmelCase = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , 'image_mean' ) )
self.assertTrue(hasattr(__A , 'image_std' ) )
self.assertTrue(hasattr(__A , 'do_normalize' ) )
self.assertTrue(hasattr(__A , 'do_rescale' ) )
self.assertTrue(hasattr(__A , 'rescale_factor' ) )
self.assertTrue(hasattr(__A , 'do_resize' ) )
self.assertTrue(hasattr(__A , 'size' ) )
self.assertTrue(hasattr(__A , 'do_pad' ) )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , __A )
__UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , __A )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A , batched=__A )
__UpperCAmelCase = image_processing(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase = image_processing(__A , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase = image_processing(__A , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
# prepare image and target
__UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__UpperCAmelCase = json.loads(f.read() )
__UpperCAmelCase = {'image_id': 39_769, 'annotations': target}
# encode them
__UpperCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__UpperCAmelCase = image_processing(images=__A , annotations=__A , return_tensors='pt' )
# verify pixel values
__UpperCAmelCase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , __A )
__UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __A , atol=1E-4 ) )
# verify area
__UpperCAmelCase = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __A ) )
# verify boxes
__UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __A )
__UpperCAmelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __A , atol=1E-3 ) )
# verify image_id
__UpperCAmelCase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __A ) )
# verify is_crowd
__UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __A ) )
# verify class_labels
__UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __A ) )
# verify orig_size
__UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __A ) )
# verify size
__UpperCAmelCase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __A ) )
@slow
def __lowerCamelCase ( self ):
# prepare image, target and masks_path
__UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__UpperCAmelCase = json.loads(f.read() )
__UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
__UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__UpperCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__UpperCAmelCase = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors='pt' )
# verify pixel values
__UpperCAmelCase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , __A )
__UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __A , atol=1E-4 ) )
# verify area
__UpperCAmelCase = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __A ) )
# verify boxes
__UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __A )
__UpperCAmelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __A , atol=1E-3 ) )
# verify image_id
__UpperCAmelCase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __A ) )
# verify is_crowd
__UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __A ) )
# verify class_labels
__UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __A ) )
# verify masks
__UpperCAmelCase = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __A )
# verify orig_size
__UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __A ) )
# verify size
__UpperCAmelCase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __A ) )
| 126 | 0 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_lowerCamelCase : Any = HUGGINGFACE_HUB_CACHE
_lowerCamelCase : Any = "config.json"
_lowerCamelCase : List[str] = "diffusion_pytorch_model.bin"
_lowerCamelCase : Dict = "diffusion_flax_model.msgpack"
_lowerCamelCase : Dict = "model.onnx"
_lowerCamelCase : int = "diffusion_pytorch_model.safetensors"
_lowerCamelCase : Optional[Any] = "weights.pb"
_lowerCamelCase : List[Any] = "https://huggingface.co"
_lowerCamelCase : Optional[int] = default_cache_path
_lowerCamelCase : Dict = "diffusers_modules"
_lowerCamelCase : Dict = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
_lowerCamelCase : Optional[int] = ["fp16", "non-ema"]
_lowerCamelCase : Any = ".self_attn"
| 704 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase : List[str] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowerCamelCase : Tuple = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowerCamelCase : str = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Tuple ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def A ( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : str=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any="auto" , UpperCamelCase__ : List[str]=-1 , UpperCamelCase__ : int=0.9 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : Union[str, Any]=5_0_0 , UpperCamelCase__ : Union[str, Any]="gpt2-large" , UpperCamelCase__ : Union[str, Any]=-1 , UpperCamelCase__ : Dict=1_0_2_4 , UpperCamelCase__ : Dict=2_5 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=2_5 , ):
"""simple docstring"""
UpperCamelCase = compute_mauve(
p_text=UpperCamelCase__ , q_text=UpperCamelCase__ , p_features=UpperCamelCase__ , q_features=UpperCamelCase__ , p_tokens=UpperCamelCase__ , q_tokens=UpperCamelCase__ , num_buckets=UpperCamelCase__ , pca_max_data=UpperCamelCase__ , kmeans_explained_var=UpperCamelCase__ , kmeans_num_redo=UpperCamelCase__ , kmeans_max_iter=UpperCamelCase__ , featurize_model_name=UpperCamelCase__ , device_id=UpperCamelCase__ , max_text_length=UpperCamelCase__ , divergence_curve_discretization_size=UpperCamelCase__ , mauve_scaling_factor=UpperCamelCase__ , verbose=UpperCamelCase__ , seed=UpperCamelCase__ , )
return out
| 324 | 0 |
"""simple docstring"""
lowerCAmelCase__ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def _lowerCamelCase ( __a ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(__a )
SCREAMING_SNAKE_CASE_ = ''''''.join(bin(__a )[2:].zfill(8 ) for byte in data )
SCREAMING_SNAKE_CASE_ = len(__a ) % 6 != 0
if padding_needed:
# The padding that will be added later
SCREAMING_SNAKE_CASE_ = B'''=''' * ((6 - len(__a ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__a ) % 6)
else:
SCREAMING_SNAKE_CASE_ = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(__a ), 6 ) ).encode()
+ padding
)
def _lowerCamelCase ( __a ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__a, __a ) and not isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = (
'''argument should be a bytes-like object or ASCII string, '''
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(__a )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__a, __a ):
try:
SCREAMING_SNAKE_CASE_ = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
SCREAMING_SNAKE_CASE_ = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__a ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
SCREAMING_SNAKE_CASE_ = encoded_data[:-padding]
SCREAMING_SNAKE_CASE_ = ''''''.join(
bin(B64_CHARSET.index(__a ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
SCREAMING_SNAKE_CASE_ = ''''''.join(
bin(B64_CHARSET.index(__a ) )[2:].zfill(6 ) for char in encoded_data )
SCREAMING_SNAKE_CASE_ = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(__a ), 8 )
]
return bytes(__a )
if __name__ == "__main__":
import doctest
doctest.testmod() | 626 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class snake_case :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__ = field(default=__lowercase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
UpperCAmelCase__ = field(default=__lowercase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class snake_case :
UpperCAmelCase__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
UpperCAmelCase__ = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
UpperCAmelCase__ = field(
default=1_024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
UpperCAmelCase__ = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
UpperCAmelCase__ = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
UpperCAmelCase__ = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
UpperCAmelCase__ = field(default=__lowercase , metadata={'''help''': '''Source language id for translation.'''} )
UpperCAmelCase__ = field(default=__lowercase , metadata={'''help''': '''Target language id for translation.'''} )
UpperCAmelCase__ = field(default=__lowercase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def _lowerCamelCase ( __a, __a, __a ):
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(__a, os.path.join(__a, F'{split}_results.json' ) )
def _lowerCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
check_output_dir(__a )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ), training_args.fpaa, )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''', __a )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(__a, __a, __a ):
assert hasattr(__a, __a ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(__a, __a, getattr(__a, __a ) )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path, from_tf='''.ckpt''' in model_args.model_name_or_path, config=__a, cache_dir=model_args.cache_dir, )
# use task specific params
use_task_specific_params(__a, data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
SCREAMING_SNAKE_CASE_ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__a, (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__a )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
SCREAMING_SNAKE_CASE_ = SeqaSeqDataset
# Get datasets
SCREAMING_SNAKE_CASE_ = (
dataset_class(
__a, type_path='''train''', data_dir=data_args.data_dir, n_obs=data_args.n_train, max_target_length=data_args.max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '''''', )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ = (
dataset_class(
__a, type_path='''val''', data_dir=data_args.data_dir, n_obs=data_args.n_val, max_target_length=data_args.val_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '''''', )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
SCREAMING_SNAKE_CASE_ = (
dataset_class(
__a, type_path='''test''', data_dir=data_args.data_dir, n_obs=data_args.n_test, max_target_length=data_args.test_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '''''', )
if training_args.do_predict
else None
)
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ = (
build_compute_metrics_fn(data_args.task, __a ) if training_args.predict_with_generate else None
)
SCREAMING_SNAKE_CASE_ = SeqaSeqTrainer(
model=__a, args=__a, data_args=__a, train_dataset=__a, eval_dataset=__a, data_collator=SeqaSeqDataCollator(
__a, __a, model.config.decoder_start_token_id, training_args.tpu_num_cores ), compute_metrics=__a, tokenizer=__a, )
SCREAMING_SNAKE_CASE_ = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
SCREAMING_SNAKE_CASE_ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
SCREAMING_SNAKE_CASE_ = train_result.metrics
SCREAMING_SNAKE_CASE_ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''', __a, training_args.output_dir )
all_metrics.update(__a )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE_ = trainer.evaluate(metric_key_prefix='''val''' )
SCREAMING_SNAKE_CASE_ = data_args.n_val
SCREAMING_SNAKE_CASE_ = round(metrics['''val_loss'''], 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''', __a, training_args.output_dir )
all_metrics.update(__a )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
SCREAMING_SNAKE_CASE_ = trainer.predict(test_dataset=__a, metric_key_prefix='''test''' )
SCREAMING_SNAKE_CASE_ = test_output.metrics
SCREAMING_SNAKE_CASE_ = data_args.n_test
if trainer.is_world_process_zero():
SCREAMING_SNAKE_CASE_ = round(metrics['''test_loss'''], 4 )
handle_metrics('''test''', __a, training_args.output_dir )
all_metrics.update(__a )
if training_args.predict_with_generate:
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(
test_output.predictions, skip_special_tokens=__a, clean_up_tokenization_spaces=__a )
SCREAMING_SNAKE_CASE_ = lmap(str.strip, __a )
write_txt_file(__a, os.path.join(training_args.output_dir, '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(__a, os.path.join(training_args.output_dir, '''all_results.json''' ) )
return all_metrics
def _lowerCamelCase ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 626 | 1 |
def UpperCamelCase ( lowercase_ ) -> Dict:
'''simple docstring'''
lowercase__ , lowercase__ : int = [], []
while len(lowercase_ ) > 1:
lowercase__ , lowercase__ : int = min(lowercase_ ), max(lowercase_ )
start.append(lowercase_ )
end.append(lowercase_ )
collection.remove(lowercase_ )
collection.remove(lowercase_ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
lowerCamelCase__ : int = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ : int = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 495 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _snake_case ( UpperCAmelCase_ ):
@staticmethod
@abstractmethod
def lowercase__ ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def lowercase__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 495 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
SCREAMING_SNAKE_CASE__ = [8, 5, 9, 7]
SCREAMING_SNAKE_CASE__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
SCREAMING_SNAKE_CASE__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase :
def __init__( self , lowercase , lowercase , lowercase , ) -> None:
lowerCAmelCase = claim_vector
lowerCAmelCase = allocated_resources_table
lowerCAmelCase = maximum_claim_table
def _snake_case ( self ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _snake_case ( self ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _snake_case ( self ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowercase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _snake_case ( self ) -> dict[int, list[int]]:
return {self.__need().index(lowercase ): i for i in self.__need()}
def _snake_case ( self , **lowercase ) -> None:
lowerCAmelCase = self.__need()
lowerCAmelCase = self.__allocated_resources_table
lowerCAmelCase = self.__available_resources()
lowerCAmelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
lowerCAmelCase = False
for each_need in need_list:
lowerCAmelCase = True
for index, need in enumerate(lowercase ):
if need > available_resources[index]:
lowerCAmelCase = False
break
if execution:
lowerCAmelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowerCAmelCase = original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(lowercase )
# update available/freed resources stack
lowerCAmelCase = np.array(lowercase ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(lowercase ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def _snake_case ( self ) -> Optional[Any]:
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(lowercase ) + 1}'
+ """ """.join(f'{it:>8}' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(lowercase ) + 1}'
+ """ """.join(f'{it:>8}' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(lowercase ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(lowercase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 532 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
a__ = logging.get_logger(__name__)
a__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a__ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
a__ = {
"""google/realm-cc-news-pretrained-embedder""": 5_12,
"""google/realm-cc-news-pretrained-encoder""": 5_12,
"""google/realm-cc-news-pretrained-scorer""": 5_12,
"""google/realm-cc-news-pretrained-openqa""": 5_12,
"""google/realm-orqa-nq-openqa""": 5_12,
"""google/realm-orqa-nq-reader""": 5_12,
"""google/realm-orqa-wq-openqa""": 5_12,
"""google/realm-orqa-wq-reader""": 5_12,
}
a__ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = VOCAB_FILES_NAMES
snake_case_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : Any = PRETRAINED_INIT_CONFIGURATION
snake_case_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : Dict = RealmTokenizer
def __init__( self : int , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Union[str, Any]="[UNK]" , lowerCAmelCase : List[str]="[SEP]" , lowerCAmelCase : Optional[int]="[PAD]" , lowerCAmelCase : List[Any]="[CLS]" , lowerCAmelCase : Any="[MASK]" , lowerCAmelCase : Dict=True , lowerCAmelCase : int=None , **lowerCAmelCase : Tuple , ) -> List[str]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
_snake_case : str = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase) != tokenize_chinese_chars
):
_snake_case : Tuple = getattr(lowerCAmelCase , normalizer_state.pop("""type"""))
_snake_case : Any = do_lower_case
_snake_case : Optional[int] = strip_accents
_snake_case : str = tokenize_chinese_chars
_snake_case : List[str] = normalizer_class(**lowerCAmelCase)
_snake_case : int = do_lower_case
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : List[str] , **lowerCAmelCase : Tuple) -> Union[str, Any]:
"""simple docstring"""
_snake_case : Dict = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[Any] = kwargs.pop("""text_pair""" , lowerCAmelCase)
_snake_case : Union[str, Any] = kwargs.pop("""return_tensors""" , lowerCAmelCase)
_snake_case : Optional[Any] = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(lowerCAmelCase):
if batch_text_pair is not None:
_snake_case : Dict = batch_text_pair[idx]
else:
_snake_case : List[str] = None
_snake_case : Optional[int] = super().__call__(lowerCAmelCase , lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
_snake_case : str = encoded_candidates.get("""input_ids""")
_snake_case : Union[str, Any] = encoded_candidates.get("""attention_mask""")
_snake_case : Any = encoded_candidates.get("""token_type_ids""")
if encoded_input_ids is not None:
output_data["input_ids"].append(lowerCAmelCase)
if encoded_attention_mask is not None:
output_data["attention_mask"].append(lowerCAmelCase)
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(lowerCAmelCase)
_snake_case : str = {key: item for key, item in output_data.items() if len(lowerCAmelCase) != 0}
return BatchEncoding(lowerCAmelCase , tensor_type=lowerCAmelCase)
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any]=None) -> List[str]:
"""simple docstring"""
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_snake_case : List[Any] = [self.sep_token_id]
_snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_snake_case : Dict = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase)
return tuple(lowerCAmelCase)
| 477 | 0 |
class snake_case_ :
'''simple docstring'''
def __init__( self : str ) -> Optional[int]:
lowerCamelCase_ : Optional[Any] = ""
lowerCamelCase_ : Dict = ""
lowerCamelCase_ : Union[str, Any] = []
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : int , __magic_name__ : int ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowerCamelCase_ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowerCamelCase_ : Dict = self.__min_dist_top_down_dp(__magic_name__ , n - 1 )
lowerCamelCase_ : List[str] = self.__min_dist_top_down_dp(m - 1 , __magic_name__ )
lowerCamelCase_ : str = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowerCamelCase_ : Dict = 1 + min(__magic_name__ , __magic_name__ , __magic_name__ )
return self.dp[m][n]
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str , __magic_name__ : str ) -> int:
lowerCamelCase_ : int = worda
lowerCamelCase_ : List[Any] = worda
lowerCamelCase_ : List[Any] = [[-1 for _ in range(len(__magic_name__ ) )] for _ in range(len(__magic_name__ ) )]
return self.__min_dist_top_down_dp(len(__magic_name__ ) - 1 , len(__magic_name__ ) - 1 )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : str ) -> int:
lowerCamelCase_ : List[Any] = worda
lowerCamelCase_ : Tuple = worda
lowerCamelCase_ : List[Any] = len(__magic_name__ )
lowerCamelCase_ : int = len(__magic_name__ )
lowerCamelCase_ : List[Any] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowerCamelCase_ : Optional[int] = j
elif j == 0: # second string is empty
lowerCamelCase_ : Union[str, Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowerCamelCase_ : str = self.dp[i - 1][j - 1]
else:
lowerCamelCase_ : Optional[Any] = self.dp[i][j - 1]
lowerCamelCase_ : Optional[Any] = self.dp[i - 1][j]
lowerCamelCase_ : Optional[Any] = self.dp[i - 1][j - 1]
lowerCamelCase_ : int = 1 + min(__magic_name__ , __magic_name__ , __magic_name__ )
return self.dp[m][n]
if __name__ == "__main__":
snake_case_ : Dict = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
snake_case_ : Any = input("Enter the first string: ").strip()
snake_case_ : Tuple = input("Enter the second string: ").strip()
print()
print(f"The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}")
print(f"The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}")
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 253 |
import torch
from torch import nn
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : List[str]=1 , __magic_name__ : List[Any]=False ) -> str:
super().__init__()
lowerCamelCase_ : List[Any] = n_token
lowerCamelCase_ : Union[str, Any] = d_embed
lowerCamelCase_ : List[str] = d_proj
lowerCamelCase_ : Dict = cutoffs + [n_token]
lowerCamelCase_ : Any = [0] + self.cutoffs
lowerCamelCase_ : Tuple = div_val
lowerCamelCase_ : Any = self.cutoffs[0]
lowerCamelCase_ : List[str] = len(self.cutoffs ) - 1
lowerCamelCase_ : Dict = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCamelCase_ : List[Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCamelCase_ : List[str] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCamelCase_ : Union[str, Any] = nn.ModuleList()
lowerCamelCase_ : Optional[int] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__magic_name__ , __magic_name__ ) ) )
else:
self.out_projs.append(__magic_name__ )
self.out_layers.append(nn.Linear(__magic_name__ , __magic_name__ ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase_ : Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__magic_name__ , __magic_name__ ) ) )
self.out_layers.append(nn.Linear(__magic_name__ , r_idx - l_idx ) )
lowerCamelCase_ : Any = keep_order
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Tuple ) -> Any:
if proj is None:
lowerCamelCase_ : Tuple = nn.functional.linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCamelCase_ : Optional[Any] = nn.functional.linear(__magic_name__ , proj.t().contiguous() )
lowerCamelCase_ : int = nn.functional.linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : Optional[int]=None , __magic_name__ : Any=False ) -> Tuple:
if labels is not None:
# Shift so that tokens < n predict n
lowerCamelCase_ : Union[str, Any] = hidden[..., :-1, :].contiguous()
lowerCamelCase_ : Any = labels[..., 1:].contiguous()
lowerCamelCase_ : Union[str, Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCamelCase_ : int = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
lowerCamelCase_ : str = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCamelCase_ : Optional[int] = self._compute_logit(__magic_name__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCamelCase_ : Union[str, Any] = labels != -100
lowerCamelCase_ : Optional[int] = torch.zeros_like(__magic_name__ , dtype=hidden.dtype , device=hidden.device )
lowerCamelCase_ : Optional[int] = (
-nn.functional.log_softmax(__magic_name__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCamelCase_ : int = nn.functional.log_softmax(__magic_name__ , dim=-1 )
else:
# construct weights and biases
lowerCamelCase_ , lowerCamelCase_ : Tuple = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCamelCase_ , lowerCamelCase_ : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase_ : List[Any] = self.out_layers[0].weight[l_idx:r_idx]
lowerCamelCase_ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCamelCase_ : int = self.out_layers[i].weight
lowerCamelCase_ : Dict = self.out_layers[i].bias
if i == 0:
lowerCamelCase_ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCamelCase_ : int = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__magic_name__ )
biases.append(__magic_name__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCamelCase_ : Tuple = self._compute_logit(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase_ : Tuple = nn.functional.log_softmax(__magic_name__ , dim=1 )
if labels is None:
lowerCamelCase_ : Optional[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCamelCase_ : Dict = torch.zeros_like(__magic_name__ , dtype=hidden.dtype , device=hidden.device )
lowerCamelCase_ : str = 0
lowerCamelCase_ : Dict = [0] + self.cutoffs
for i in range(len(__magic_name__ ) - 1 ):
lowerCamelCase_ , lowerCamelCase_ : str = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCamelCase_ : List[str] = (labels >= l_idx) & (labels < r_idx)
lowerCamelCase_ : Optional[int] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCamelCase_ : List[Any] = labels.index_select(0 , __magic_name__ ) - l_idx
lowerCamelCase_ : int = head_logprob.index_select(0 , __magic_name__ )
lowerCamelCase_ : Tuple = hidden.index_select(0 , __magic_name__ )
else:
lowerCamelCase_ : Optional[Any] = hidden
if i == 0:
if labels is not None:
lowerCamelCase_ : Dict = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCamelCase_ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = weights[i], biases[i], self.out_projs[i]
lowerCamelCase_ : Dict = self._compute_logit(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase_ : Tuple = nn.functional.log_softmax(__magic_name__ , dim=1 )
lowerCamelCase_ : Tuple = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCamelCase_ : Optional[Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCamelCase_ : List[Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCamelCase_ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , __magic_name__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int ) -> List[str]:
if self.n_clusters == 0:
lowerCamelCase_ : Union[str, Any] = self._compute_logit(__magic_name__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__magic_name__ , dim=-1 )
else:
# construct weights and biases
lowerCamelCase_ , lowerCamelCase_ : Any = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCamelCase_ , lowerCamelCase_ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase_ : Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
lowerCamelCase_ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCamelCase_ : Optional[int] = self.out_layers[i].weight
lowerCamelCase_ : List[Any] = self.out_layers[i].bias
if i == 0:
lowerCamelCase_ : str = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCamelCase_ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__magic_name__ )
biases.append(__magic_name__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Tuple = weights[0], biases[0], self.out_projs[0]
lowerCamelCase_ : int = self._compute_logit(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase_ : Union[str, Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCamelCase_ : Optional[int] = nn.functional.log_softmax(__magic_name__ , dim=1 )
lowerCamelCase_ : Dict = [0] + self.cutoffs
for i in range(len(__magic_name__ ) - 1 ):
lowerCamelCase_ , lowerCamelCase_ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCamelCase_ : Tuple = head_logprob[:, : self.cutoffs[0]]
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCamelCase_ : Tuple = self._compute_logit(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase_ : Optional[Any] = nn.functional.log_softmax(__magic_name__ , dim=1 )
lowerCamelCase_ : Tuple = head_logprob[:, -i] + tail_logprob_i
lowerCamelCase_ : Any = logprob_i
return out
| 253 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
snake_case : Dict = False
class _snake_case ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__magic_name__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
__magic_name__ : List[Any] = torch.manual_seed(0 )
__magic_name__ : List[Any] = pipe.dual_guided(
prompt="first prompt" , image=_a , text_to_image_strength=0.75 , generator=_a , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_a )
__magic_name__ : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained(_a , torch_dtype=torch.floataa )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__magic_name__ : Optional[int] = generator.manual_seed(0 )
__magic_name__ : Union[str, Any] = pipe.dual_guided(
prompt="first prompt" , image=_a , text_to_image_strength=0.75 , generator=_a , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__magic_name__ : List[Any] = "cyberpunk 2077"
__magic_name__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
__magic_name__ : Union[str, Any] = torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = pipe.dual_guided(
prompt=_a , image=_a , text_to_image_strength=0.75 , generator=_a , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
__magic_name__ : Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__magic_name__ : Optional[Any] = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__magic_name__ : Union[str, Any] = "A painting of a squirrel eating a burger "
__magic_name__ : str = torch.manual_seed(0 )
__magic_name__ : List[str] = pipe.text_to_image(
prompt=_a , generator=_a , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
__magic_name__ : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__magic_name__ : int = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__magic_name__ : Dict = pipe.image_variation(_a , generator=_a , output_type="numpy" ).images
__magic_name__ : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__magic_name__ : Tuple = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 124 |
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_ ( _snake_case : int = 1000000 , _snake_case : int = 10 ) -> int:
'''simple docstring'''
__magic_name__ : defaultdict = defaultdict(_snake_case )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__magic_name__ : Optional[int] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__magic_name__ : Optional[int] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_snake_case , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 124 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase__( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = TextToVideoSDPipeline
_A = TEXT_TO_IMAGE_PARAMS
_A = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_A = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
A =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
A =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
A =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
A =CLIPTextModel(snake_case__ )
A =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : Any=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith("mps" ):
A =torch.manual_seed(snake_case__ )
else:
A =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
A ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def _a ( self : Optional[int] ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =self.get_dummy_components()
A =TextToVideoSDPipeline(**snake_case__ )
A =sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
A =self.get_dummy_inputs(snake_case__ )
A ="np"
A =sd_pipe(**snake_case__ ).frames
A =frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A =np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self : List[Any] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _a ( self : str ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ , expected_max_diff=1E-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _a ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _a ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def _a ( self : Optional[int] ):
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
A =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
A =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
A =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A =pipe.to("cuda" )
A ="Spiderman is surfing"
A =torch.Generator(device="cpu" ).manual_seed(0 )
A =pipe(snake_case__ , generator=snake_case__ , num_inference_steps=25 , output_type="pt" ).frames
A =video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
A =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
A =pipe.to("cuda" )
A ="Spiderman is surfing"
A =torch.Generator(device="cpu" ).manual_seed(0 )
A =pipe(snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="pt" ).frames
A =video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 689 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
__a = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
__a = {
"""ctrl""": 2_5_6,
}
__a = {
"""Pregnancy""": 1_6_8_6_2_9,
"""Christianity""": 7_6_7_5,
"""Explain""": 1_0_6_4_2_3,
"""Fitness""": 6_3_4_4_0,
"""Saving""": 6_3_1_6_3,
"""Ask""": 2_7_1_7_1,
"""Ass""": 9_5_9_8_5,
"""Joke""": 1_6_3_5_0_9,
"""Questions""": 4_5_6_2_2,
"""Thoughts""": 4_9_6_0_5,
"""Retail""": 5_2_3_4_2,
"""Feminism""": 1_6_4_3_3_8,
"""Writing""": 1_1_9_9_2,
"""Atheism""": 1_9_2_2_6_3,
"""Netflix""": 4_8_6_1_6,
"""Computing""": 3_9_6_3_9,
"""Opinion""": 4_3_2_1_3,
"""Alone""": 4_4_9_6_7,
"""Funny""": 5_8_9_1_7,
"""Gaming""": 4_0_3_5_8,
"""Human""": 4_0_8_8,
"""India""": 1_3_3_1,
"""Joker""": 7_7_1_3_8,
"""Diet""": 3_6_2_0_6,
"""Legal""": 1_1_8_5_9,
"""Norman""": 4_9_3_9,
"""Tip""": 7_2_6_8_9,
"""Weight""": 5_2_3_4_3,
"""Movies""": 4_6_2_7_3,
"""Running""": 2_3_4_2_5,
"""Science""": 2_0_9_0,
"""Horror""": 3_7_7_9_3,
"""Confession""": 6_0_5_7_2,
"""Finance""": 1_2_2_5_0,
"""Politics""": 1_6_3_6_0,
"""Scary""": 1_9_1_9_8_5,
"""Support""": 1_2_6_5_4,
"""Technologies""": 3_2_5_1_6,
"""Teenage""": 6_6_1_6_0,
"""Event""": 3_2_7_6_9,
"""Learned""": 6_7_4_6_0,
"""Notion""": 1_8_2_7_7_0,
"""Wikipedia""": 3_7_5_8_3,
"""Books""": 6_6_6_5,
"""Extract""": 7_6_0_5_0,
"""Confessions""": 1_0_2_7_0_1,
"""Conspiracy""": 7_5_9_3_2,
"""Links""": 6_3_6_7_4,
"""Narcissus""": 1_5_0_4_2_5,
"""Relationship""": 5_4_7_6_6,
"""Relationships""": 1_3_4_7_9_6,
"""Reviews""": 4_1_6_7_1,
"""News""": 4_2_5_6,
"""Translation""": 2_6_8_2_0,
"""multilingual""": 1_2_8_4_0_6,
}
def UpperCamelCase_ ( a_ ) ->List[str]:
A =set()
A =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A =char
A =set(a_ )
return pairs
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = CONTROL_CODES
def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]="<unk>" , **snake_case__ : List[str] ):
"""simple docstring"""
super().__init__(unk_token=snake_case__ , **snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
A =json.load(snake_case__ )
A ={v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
A =merges_handle.read().split("\n" )[1:-1]
A =[tuple(merge.split() ) for merge in merges]
A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
A ={}
@property
def _a ( self : str ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : List[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : int , snake_case__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A =tuple(snake_case__ )
A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A =get_pairs(snake_case__ )
if not pairs:
return token
while True:
A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A , A =bigram
A =[]
A =0
while i < len(snake_case__ ):
try:
A =word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A =j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A =tuple(snake_case__ )
A =new_word
if len(snake_case__ ) == 1:
break
else:
A =get_pairs(snake_case__ )
A ="@@ ".join(snake_case__ )
A =word[:-4]
A =word
return word
def _a ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
A =[]
A =re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def _a ( self : List[str] , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
A =" ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def _a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
A =0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
A =token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 689 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase ( __lowerCamelCase ):
snake_case_ = ["""image_processor""", """tokenizer"""]
snake_case_ = """BlipImageProcessor"""
snake_case_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Union[str, Any] ,A : List[Any] ,A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = False
super().__init__(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase__ : Tuple = self.image_processor
def __call__( self : Union[str, Any] ,A : str = None ,A : List[Any] = None ,A : Optional[Any] = True ,A : Optional[int] = False ,A : Dict = None ,A : int = None ,A : List[Any] = 0 ,A : Optional[int] = None ,A : Optional[int] = None ,A : int = False ,A : Any = False ,A : str = False ,A : List[str] = False ,A : Union[str, Any] = False ,A : str = True ,A : Union[str, Any] = None ,**A : Tuple ,):
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
UpperCAmelCase__ : List[Any] = self.tokenizer
UpperCAmelCase__ : Any = self.tokenizer(
text=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=lowerCamelCase_ ,stride=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_overflowing_tokens=lowerCamelCase_ ,return_special_tokens_mask=lowerCamelCase_ ,return_offsets_mapping=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,return_length=lowerCamelCase_ ,verbose=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ ,)
return text_encoding
# add pixel_values
UpperCAmelCase__ : Optional[int] = self.image_processor(lowerCamelCase_ ,return_tensors=lowerCamelCase_ )
if text is not None:
UpperCAmelCase__ : Any = self.tokenizer(
text=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=lowerCamelCase_ ,stride=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,return_overflowing_tokens=lowerCamelCase_ ,return_special_tokens_mask=lowerCamelCase_ ,return_offsets_mapping=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,return_length=lowerCamelCase_ ,verbose=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ ,)
else:
UpperCAmelCase__ : Tuple = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase_ )
return encoding_image_processor
def __lowercase ( self : List[Any] ,*A : List[Any] ,**A : int ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_ ,**lowerCamelCase_ )
def __lowercase ( self : Any ,*A : int ,**A : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_ ,**lowerCamelCase_ )
@property
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.tokenizer.model_input_names
UpperCAmelCase__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 65 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase ={
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =[
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 617 | 0 |
'''simple docstring'''
def lowercase__ ( _UpperCamelCase) -> list[int]:
"""simple docstring"""
UpperCamelCase = len(_UpperCamelCase)
for i in range(_UpperCamelCase):
for j in range(i + 1 , _UpperCamelCase):
if numbers[j] < numbers[i]:
UpperCamelCase , UpperCamelCase = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__magic_name__ : str = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 709 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__magic_name__ : Union[str, Any] = get_logger()
__magic_name__ : Optional[dict] = None
class A__ ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
'''simple docstring'''
def __init__( self : Dict , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None , **_SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
super().__init__(features=_SCREAMING_SNAKE_CASE )
import jax
from jaxlib.xla_client import Device
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
f'Expected {device} to be a `str` not {type(_SCREAMING_SNAKE_CASE )}, as `jaxlib.xla_extension.Device` '
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
UpperCamelCase = device if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'Device with string identifier {self.device} not listed among the available '
f'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
f'device: {str(jax.devices()[0] )}.' )
UpperCamelCase = str(jax.devices()[0] )
UpperCamelCase = jnp_array_kwargs
@staticmethod
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
import jax
return {str(_SCREAMING_SNAKE_CASE ): device for device in jax.devices()}
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and column:
if all(
isinstance(_SCREAMING_SNAKE_CASE , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_SCREAMING_SNAKE_CASE , axis=0 )
return column
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_SCREAMING_SNAKE_CASE , (str, bytes, type(_SCREAMING_SNAKE_CASE )) ):
return value
elif isinstance(_SCREAMING_SNAKE_CASE , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase = {}
if isinstance(_SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCamelCase = {'dtype': jnp.intaa}
else:
UpperCamelCase = {'dtype': jnp.intaa}
elif isinstance(_SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
UpperCamelCase = np.asarray(_SCREAMING_SNAKE_CASE )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_SCREAMING_SNAKE_CASE , '__array__' ) and not isinstance(_SCREAMING_SNAKE_CASE , jax.Array ):
UpperCamelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_SCREAMING_SNAKE_CASE ) for substruct in data_struct] )
return self._tensorize(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : dict ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , _SCREAMING_SNAKE_CASE , map_list=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Dict , _SCREAMING_SNAKE_CASE : pa.Table ):
"""simple docstring"""
UpperCamelCase = self.numpy_arrow_extractor().extract_row(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.python_features_decoder.decode_row(_SCREAMING_SNAKE_CASE )
return self.recursive_tensorize(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : pa.Table ):
"""simple docstring"""
UpperCamelCase = self.numpy_arrow_extractor().extract_column(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.python_features_decoder.decode_column(_SCREAMING_SNAKE_CASE , pa_table.column_names[0] )
UpperCamelCase = self.recursive_tensorize(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self._consolidate(_SCREAMING_SNAKE_CASE )
return column
def _SCREAMING_SNAKE_CASE ( self : List[str] , _SCREAMING_SNAKE_CASE : pa.Table ):
"""simple docstring"""
UpperCamelCase = self.numpy_arrow_extractor().extract_batch(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.python_features_decoder.decode_batch(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.recursive_tensorize(_SCREAMING_SNAKE_CASE )
for column_name in batch:
UpperCamelCase = self._consolidate(batch[column_name] )
return batch
| 410 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : Any , _lowercase : Tuple , _lowercase : int=3 , _lowercase : int=32 , _lowercase : Any=3 , _lowercase : str=10 , _lowercase : int=[10, 20, 30, 40] , _lowercase : List[str]=[1, 1, 2, 1] , _lowercase : Tuple=True , _lowercase : List[str]=True , _lowercase : int="relu" , _lowercase : List[str]=3 , _lowercase : str=None , ):
A = parent
A = batch_size
A = image_size
A = num_channels
A = embeddings_size
A = hidden_sizes
A = depths
A = is_training
A = use_labels
A = hidden_act
A = num_labels
A = scope
A = len(_lowercase )
def __a ( self : Tuple ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = self.get_config()
return config, pixel_values
def __a ( self : List[Any] ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __a ( self : int , _lowercase : Any , _lowercase : int ):
A = FlaxRegNetModel(config=_lowercase )
A = model(_lowercase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Dict ):
A = self.num_labels
A = FlaxRegNetForImageClassification(config=_lowercase )
A = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : Tuple ):
A = self.prepare_config_and_inputs()
A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def __a ( self : List[Any] ):
A = FlaxRegNetModelTester(self )
A = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def __a ( self : List[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self : str ):
return
def __a ( self : List[Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def __a ( self : Any ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __a ( self : str ):
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __a ( self : Any ):
pass
def __a ( self : List[Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowercase )
A = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowercase )
def __a ( self : List[Any] ):
def check_hidden_states_output(_lowercase : str , _lowercase : Dict , _lowercase : Optional[Any] ):
A = model_class(_lowercase )
A = model(**self._prepare_for_class(_lowercase , _lowercase ) )
A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def __a ( self : int ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A = self._prepare_for_class(_lowercase , _lowercase )
A = model_class(_lowercase )
@jax.jit
def model_jitted(_lowercase : Optional[Any] , **_lowercase : Any ):
return model(pixel_values=_lowercase , **_lowercase )
with self.subTest('JIT Enabled' ):
A = model_jitted(**_lowercase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
A = model_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def __snake_case ( ) -> Any:
"""simple docstring"""
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : int ):
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def __a ( self : Tuple ):
A = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=_lowercase , return_tensors='np' )
A = model(**_lowercase )
# verify the logits
A = (1, 1_000)
self.assertEqual(outputs.logits.shape , _lowercase )
A = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 690 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __snake_case ( UpperCamelCase__ ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
A = {'+', '-', '*', '/'}
A = []
for token in postfix_notation:
if token in operations:
A , A = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 | 1 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase__ = 300 # TEMPERATURE (unit = K)
def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ):
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 552 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a__ ( snake_case__ ):
_a : Optional[Any] = """M-CLIP"""
def __init__( self , _A=1_0_2_4 , _A=7_6_8 , **_A ):
"""simple docstring"""
__lowerCAmelCase = transformerDimSize
__lowerCAmelCase = imageDimSize
super().__init__(**_A )
class a__ ( snake_case__ ):
_a : Tuple = MCLIPConfig
def __init__( self , _A , *_A , **_A ):
"""simple docstring"""
super().__init__(_A , *_A , **_A )
__lowerCAmelCase = XLMRobertaModel(_A )
__lowerCAmelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.transformer(input_ids=_A , attention_mask=_A )[0]
__lowerCAmelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_A ), embs
| 552 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 625 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str ={
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] =input_paths_and_base_extractors[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE : Union[str, Any] =f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__a )
assert base_extractor.is_extractable(__a )
SCREAMING_SNAKE_CASE : Tuple =tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(__a , __a )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE : List[str] =file_path.read_text(encoding='''utf-8''' )
else:
SCREAMING_SNAKE_CASE : Dict =output_path.read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE : Optional[int] =text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] ={
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
SCREAMING_SNAKE_CASE : Any =input_paths[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE : int =f'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__a )
SCREAMING_SNAKE_CASE : int =Extractor.infer_extractor_format(__a )
assert extractor_format is not None
SCREAMING_SNAKE_CASE : List[str] =tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(__a , __a , __a )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE : Any =file_path.read_text(encoding='''utf-8''' )
else:
SCREAMING_SNAKE_CASE : List[str] =output_path.read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE : List[str] =text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
import tarfile
SCREAMING_SNAKE_CASE : Any =tmp_path / '''data_dot_dot'''
directory.mkdir()
SCREAMING_SNAKE_CASE : Tuple =directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(__a , '''w''' ) as f:
f.add(__a , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
import tarfile
SCREAMING_SNAKE_CASE : List[str] =tmp_path / '''data_sym_link'''
directory.mkdir()
SCREAMING_SNAKE_CASE : Tuple =directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=__a )
with tarfile.TarFile(__a , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int ={
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
SCREAMING_SNAKE_CASE : Any =insecure_tar_files[insecure_tar_file]
SCREAMING_SNAKE_CASE : Optional[Any] =tmp_path / '''extracted'''
TarExtractor.extract(__a , __a )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] =tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
SCREAMING_SNAKE_CASE : List[Any] =(
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(__a )
assert zipfile.is_zipfile(str(__a ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__a ) # but we're right
| 258 | 0 |
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowerCAmelCase__ , n - 1 , lowerCAmelCase__ ) * a) % mod
else:
__a : str = binary_exponentiation(lowerCAmelCase__ , n / 2 , lowerCAmelCase__ )
return (b * b) % mod
# a prime number
lowercase__ =701
lowercase__ =1000000000
lowercase__ =10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 326 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __UpperCamelCase ( lowerCAmelCase__ : int ):
random.seed(lowerCAmelCase__ )
np.random.seed(lowerCAmelCase__ )
torch.manual_seed(lowerCAmelCase__ )
torch.cuda.manual_seed_all(lowerCAmelCase__ )
# ^^ safe to call this function even if cuda is not available
class UpperCamelCase__ :
def __init__(self : Any , snake_case_ : Iterable[torch.nn.Parameter] , snake_case_ : float = 0.9999 , snake_case_ : float = 0.0 , snake_case_ : int = 0 , snake_case_ : bool = False , snake_case_ : Union[float, int] = 1.0 , snake_case_ : Union[float, int] = 2 / 3 , snake_case_ : Optional[Any] = None , snake_case_ : Dict[str, Any] = None , **snake_case_ : int , ):
if isinstance(snake_case_ , torch.nn.Module ):
__a : Optional[Any] = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , snake_case_ , standard_warn=snake_case_ , )
__a : Optional[int] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
__a : str = True
if kwargs.get('''max_value''' , snake_case_ ) is not None:
__a : List[Any] = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , snake_case_ , standard_warn=snake_case_ )
__a : Optional[Any] = kwargs['''max_value''']
if kwargs.get('''min_value''' , snake_case_ ) is not None:
__a : Optional[int] = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , snake_case_ , standard_warn=snake_case_ )
__a : int = kwargs['''min_value''']
__a : Any = list(snake_case_ )
__a : Optional[int] = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , snake_case_ ) is not None:
__a : Optional[Any] = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , snake_case_ , standard_warn=snake_case_ )
self.to(device=kwargs['''device'''] )
__a : List[str] = None
__a : Tuple = decay
__a : str = min_decay
__a : Any = update_after_step
__a : List[str] = use_ema_warmup
__a : Any = inv_gamma
__a : Any = power
__a : Union[str, Any] = 0
__a : Dict = None # set in `step()`
__a : Any = model_cls
__a : Any = model_config
@classmethod
def lowerCAmelCase (cls : List[str] , snake_case_ : Dict , snake_case_ : Dict ):
__a , __a : Optional[int] = model_cls.load_config(snake_case_ , return_unused_kwargs=snake_case_ )
__a : Dict = model_cls.from_pretrained(snake_case_ )
__a : List[Any] = cls(model.parameters() , model_cls=snake_case_ , model_config=model.config )
ema_model.load_state_dict(snake_case_ )
return ema_model
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Dict ):
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
__a : int = self.model_cls.from_config(self.model_config )
__a : List[Any] = self.state_dict()
state_dict.pop('''shadow_params''' , snake_case_ )
model.register_to_config(**snake_case_ )
self.copy_to(model.parameters() )
model.save_pretrained(snake_case_ )
def lowerCAmelCase (self : Optional[int] , snake_case_ : int ):
__a : Tuple = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
__a : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
__a : List[str] = (1 + step) / (1_0 + step)
__a : Dict = min(snake_case_ , self.decay )
# make sure decay is not smaller than min_decay
__a : int = max(snake_case_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase (self : Optional[int] , snake_case_ : Iterable[torch.nn.Parameter] ):
if isinstance(snake_case_ , torch.nn.Module ):
__a : List[Any] = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , snake_case_ , standard_warn=snake_case_ , )
__a : Union[str, Any] = parameters.parameters()
__a : Optional[Any] = list(snake_case_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
__a : str = self.get_decay(self.optimization_step )
__a : List[str] = decay
__a : Dict = 1 - decay
__a : Optional[int] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , snake_case_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
__a : Dict = deepspeed.zero.GatheredParameters(snake_case_ , modifier_rank=snake_case_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(snake_case_ )
def lowerCAmelCase (self : int , snake_case_ : Iterable[torch.nn.Parameter] ):
__a : str = list(snake_case_ )
for s_param, param in zip(self.shadow_params , snake_case_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase (self : int , snake_case_ : int=None , snake_case_ : int=None ):
__a : str = [
p.to(device=snake_case_ , dtype=snake_case_ ) if p.is_floating_point() else p.to(device=snake_case_ )
for p in self.shadow_params
]
def lowerCAmelCase (self : Dict ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase (self : Tuple , snake_case_ : Iterable[torch.nn.Parameter] ):
__a : str = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase (self : Optional[int] , snake_case_ : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , snake_case_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
__a : Optional[Any] = None
def lowerCAmelCase (self : Optional[int] , snake_case_ : dict ):
__a : Dict = copy.deepcopy(snake_case_ )
__a : int = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
__a : List[str] = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , snake_case_ ):
raise ValueError('''Invalid min_decay''' )
__a : Dict = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , snake_case_ ):
raise ValueError('''Invalid optimization_step''' )
__a : Optional[int] = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , snake_case_ ):
raise ValueError('''Invalid update_after_step''' )
__a : Any = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , snake_case_ ):
raise ValueError('''Invalid use_ema_warmup''' )
__a : Any = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
__a : Tuple = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
__a : Dict = state_dict.get('''shadow_params''' , snake_case_ )
if shadow_params is not None:
__a : Tuple = shadow_params
if not isinstance(self.shadow_params , snake_case_ ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(snake_case_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 326 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : List[Any] ,lowercase__ : Union[str, Any] ,):
__lowercase = parent
__lowercase = 1_3
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = 9_9
__lowercase = 3_2
__lowercase = 2
__lowercase = 4
__lowercase = 3_7
__lowercase = '''gelu'''
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 5_1_2
__lowercase = 1_6
__lowercase = 2
__lowercase = 0.0_2
__lowercase = 3
__lowercase = 4
__lowercase = None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : str ):
__lowercase = TFDistilBertModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : str ,lowercase__ : str ,lowercase__ : Union[str, Any] ):
__lowercase = TFDistilBertForMaskedLM(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : str ):
__lowercase = TFDistilBertForQuestionAnswering(config=lowercase__ )
__lowercase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ):
__lowercase = self.num_labels
__lowercase = TFDistilBertForSequenceClassification(lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Optional[int] ):
__lowercase = self.num_choices
__lowercase = TFDistilBertForMultipleChoice(lowercase__ )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : List[Any] ,lowercase__ : List[str] ,lowercase__ : int ):
__lowercase = self.num_labels
__lowercase = TFDistilBertForTokenClassification(lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.prepare_config_and_inputs()
((__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase)) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = TFDistilBertModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,dim=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__lowercase = TFDistilBertModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_tf
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(lowercase__ )[0]
__lowercase = [1, 6, 7_6_8]
self.assertEqual(output.shape ,lowercase__ )
__lowercase = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,lowercase__ ,atol=1e-4 )
| 41 |
"""simple docstring"""
from math import ceil, sqrt
def UpperCAmelCase_ ( __a : int = 1_00_00_00 ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_lowerCamelCase : Optional[int] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_lowerCamelCase : Any = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"{solution() = }")
| 437 | 0 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase :
def __init__( self :Tuple ):
'''simple docstring'''
lowercase__ = ""
lowercase__ = ""
lowercase__ = []
lowercase__ = 0
lowercase__ = 2_56
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
def UpperCAmelCase ( self :Optional[Any] , _lowercase :int ):
'''simple docstring'''
lowercase__ = cva.imread(lowercase_ , 0 )
lowercase__ = copy.deepcopy(self.img )
lowercase__ = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label="x" )
lowercase__ = np.sum(lowercase_ )
for i in range(len(lowercase_ ) ):
lowercase__ = x[i] / self.k
self.sk += prk
lowercase__ = (self.L - 1) * self.sk
if self.rem != 0:
lowercase__ = int(last % last )
lowercase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowercase_ )
lowercase__ = int(np.ma.count(self.img ) / self.img[1].size )
lowercase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowercase__ = self.img[j][i]
if num != self.last_list[num]:
lowercase__ = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
_snake_case = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
_snake_case = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 704 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCAmelCase :
def __init__( self :Union[str, Any] , _lowercase :Dict , _lowercase :int=13 , _lowercase :Dict=32 , _lowercase :List[Any]=2 , _lowercase :Any=3 , _lowercase :Optional[Any]=16 , _lowercase :str=[1, 2, 1] , _lowercase :Tuple=[2, 2, 4] , _lowercase :int=2 , _lowercase :Optional[Any]=2.0 , _lowercase :List[Any]=True , _lowercase :Tuple=0.0 , _lowercase :List[str]=0.0 , _lowercase :List[str]=0.1 , _lowercase :Optional[int]="gelu" , _lowercase :Dict=False , _lowercase :Union[str, Any]=True , _lowercase :str=0.02 , _lowercase :str=1e-5 , _lowercase :Optional[Any]=True , _lowercase :Any=None , _lowercase :int=True , _lowercase :Any=10 , _lowercase :Optional[int]=8 , _lowercase :List[str]=["stage1", "stage2", "stage3"] , _lowercase :int=[1, 2, 3] , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = patch_norm
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = is_training
lowercase__ = scope
lowercase__ = use_labels
lowercase__ = type_sequence_label_size
lowercase__ = encoder_stride
lowercase__ = out_features
lowercase__ = out_indices
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase ( self :Any , _lowercase :List[Any] , _lowercase :List[str] , _lowercase :Optional[Any] ):
'''simple docstring'''
lowercase__ = MaskFormerSwinModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase )
lowercase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self :Tuple , _lowercase :int , _lowercase :Union[str, Any] , _lowercase :Any ):
'''simple docstring'''
lowercase__ = MaskFormerSwinBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowercase ):
lowercase__ = ["stem"]
lowercase__ = MaskFormerSwinBackbone(config=_lowercase )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
__lowerCamelCase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__lowerCamelCase = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = MaskFormerSwinModelTester(self )
lowercase__ = ConfigTester(self , config_class=_lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self :str ):
'''simple docstring'''
return
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowercase )
@unittest.skip("Swin does not use inputs_embeds" )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
pass
@unittest.skip("Swin does not support feedforward chunking" )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :int , _lowercase :List[Any] , _lowercase :Dict , _lowercase :str , _lowercase :str ):
'''simple docstring'''
lowercase__ = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_lowercase , _lowercase ) )
lowercase__ = outputs.hidden_states
lowercase__ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
# Swin has a different seq_length
lowercase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowercase :Optional[Any] ):
lowercase__ = 0
return t
def check_equivalence(_lowercase :Optional[int] , _lowercase :List[str] , _lowercase :Optional[Any] , _lowercase :str={} ):
with torch.no_grad():
lowercase__ = model(**_lowercase , return_dict=_lowercase , **_lowercase )
lowercase__ = model(**_lowercase , return_dict=_lowercase , **_lowercase ).to_tuple()
def recursive_check(_lowercase :int , _lowercase :Dict ):
if isinstance(_lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowercase , _lowercase ):
recursive_check(_lowercase , _lowercase )
elif isinstance(_lowercase , _lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowercase , _lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowercase ) , set_nan_tensor_to_zero(_lowercase ) , atol=1e-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}. Dict has'''
f''' `nan`: {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}.'''
) , )
recursive_check(_lowercase , _lowercase )
for model_class in self.all_model_classes:
lowercase__ = model_class(_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = self._prepare_for_class(_lowercase , _lowercase )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {"output_hidden_states": True} )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {"output_hidden_states": True} )
@require_torch
class lowerCAmelCase ( unittest.TestCase , lowercase_ ):
__lowerCamelCase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__lowerCamelCase = MaskFormerSwinConfig
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = MaskFormerSwinModelTester(self )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ = backbone_class(_lowercase )
backbone.to(_lowercase )
backbone.eval()
lowercase__ = backbone(**_lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ = backbone(**_lowercase , output_hidden_states=_lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ = backbone(**_lowercase , output_attentions=_lowercase )
self.assertIsNotNone(outputs.attentions )
| 611 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__a: Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = ['''input_features''', '''is_longer''']
def __init__( self : Optional[Any] , lowerCamelCase : int=64 , lowerCamelCase : int=4_8000 , lowerCamelCase : List[str]=480 , lowerCamelCase : int=10 , lowerCamelCase : Optional[int]=1024 , lowerCamelCase : List[Any]=0.0 , lowerCamelCase : Tuple=False , lowerCamelCase : float = 0 , lowerCamelCase : float = 1_4000 , lowerCamelCase : int = None , lowerCamelCase : str = "fusion" , lowerCamelCase : str = "repeatpad" , **lowerCamelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(
feature_size=lowerCamelCase , sampling_rate=lowerCamelCase , padding_value=lowerCamelCase , return_attention_mask=lowerCamelCase , **lowerCamelCase , )
_UpperCAmelCase = top_db
_UpperCAmelCase = truncation
_UpperCAmelCase = padding
_UpperCAmelCase = fft_window_size
_UpperCAmelCase = (fft_window_size >> 1) + 1
_UpperCAmelCase = hop_length
_UpperCAmelCase = max_length_s
_UpperCAmelCase = max_length_s * sampling_rate
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = frequency_min
_UpperCAmelCase = frequency_max
_UpperCAmelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase , min_frequency=lowerCamelCase , max_frequency=lowerCamelCase , sampling_rate=lowerCamelCase , norm=lowerCamelCase , mel_scale="""htk""" , )
_UpperCAmelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase , min_frequency=lowerCamelCase , max_frequency=lowerCamelCase , sampling_rate=lowerCamelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def lowerCamelCase ( self : List[Any] ) -> Dict[str, Any]:
"""simple docstring"""
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCamelCase ( self : Tuple , lowerCamelCase : np.array , lowerCamelCase : Optional[np.array] = None ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = spectrogram(
lowerCamelCase , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase , log_mel="""dB""" , )
return log_mel_spectrogram.T
def lowerCamelCase ( self : Tuple , lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCAmelCase = [0]
# randomly choose index for each part
_UpperCAmelCase = np.random.choice(ranges[0] )
_UpperCAmelCase = np.random.choice(ranges[1] )
_UpperCAmelCase = np.random.choice(ranges[2] )
_UpperCAmelCase = mel[idx_front : idx_front + chunk_frames, :]
_UpperCAmelCase = mel[idx_middle : idx_middle + chunk_frames, :]
_UpperCAmelCase = mel[idx_back : idx_back + chunk_frames, :]
_UpperCAmelCase = torch.tensor(mel[None, None, :] )
_UpperCAmelCase = torch.nn.functional.interpolate(
lowerCamelCase , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=lowerCamelCase )
_UpperCAmelCase = mel_shrink[0][0].numpy()
_UpperCAmelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowerCamelCase ( self : str , lowerCamelCase : np.array , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] ) -> np.array:
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_UpperCAmelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_UpperCAmelCase = len(lowerCamelCase ) - max_length
_UpperCAmelCase = np.random.randint(0 , overflow + 1 )
_UpperCAmelCase = waveform[idx : idx + max_length]
_UpperCAmelCase = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_UpperCAmelCase = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters )
_UpperCAmelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_UpperCAmelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_UpperCAmelCase = np.stack([mel, mel, mel, mel] , axis=0 )
_UpperCAmelCase = False
else:
_UpperCAmelCase = self._random_mel_fusion(lowerCamelCase , lowerCamelCase , lowerCamelCase )
_UpperCAmelCase = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
_UpperCAmelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_UpperCAmelCase = int(max_length / len(lowerCamelCase ) )
_UpperCAmelCase = np.stack(np.tile(lowerCamelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_UpperCAmelCase = int(max_length / len(lowerCamelCase ) )
_UpperCAmelCase = np.stack(np.tile(lowerCamelCase , lowerCamelCase ) )
_UpperCAmelCase = np.pad(lowerCamelCase , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
_UpperCAmelCase = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters )
_UpperCAmelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_UpperCAmelCase = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : int , lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase : str = None , lowerCamelCase : Optional[str] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , **lowerCamelCase : int , ) -> BatchFeature:
"""simple docstring"""
_UpperCAmelCase = truncation if truncation is not None else self.truncation
_UpperCAmelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_UpperCAmelCase = isinstance(lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
_UpperCAmelCase = is_batched_numpy or (
isinstance(lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase = [np.asarray(lowerCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase , np.ndarray ):
_UpperCAmelCase = np.asarray(lowerCamelCase , dtype=np.floataa )
elif isinstance(lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
_UpperCAmelCase = [
self._get_input_mel(lowerCamelCase , max_length if max_length else self.nb_max_samples , lowerCamelCase , lowerCamelCase )
for waveform in raw_speech
]
_UpperCAmelCase = []
_UpperCAmelCase = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_UpperCAmelCase = np.random.randint(0 , len(lowerCamelCase ) )
_UpperCAmelCase = True
if isinstance(input_mel[0] , lowerCamelCase ):
_UpperCAmelCase = [np.asarray(lowerCamelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_UpperCAmelCase = [[longer] for longer in is_longer]
_UpperCAmelCase = {"""input_features""": input_mel, """is_longer""": is_longer}
_UpperCAmelCase = BatchFeature(lowerCamelCase )
if return_tensors is not None:
_UpperCAmelCase = input_features.convert_to_tensors(lowerCamelCase )
return input_features | 108 |
from __future__ import annotations
import math
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
)
def UpperCAmelCase__ ( ) -> None:
_A = [90, 23, 6, 33, 21, 65, 123, 34_423]
_A = math.log(len(__snake_case ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , __snake_case , __snake_case , __snake_case )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 317 | 0 |
import random
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
_UpperCAmelCase =a[left_index]
_UpperCAmelCase =left_index + 1
for j in range(left_index + 1 , _lowerCamelCase ):
if a[j] < pivot:
_UpperCAmelCase , _UpperCAmelCase =a[i], a[j]
i += 1
_UpperCAmelCase , _UpperCAmelCase =a[i - 1], a[left_index]
return i - 1
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
if left < right:
_UpperCAmelCase =random.randint(_lowerCamelCase , right - 1 )
_UpperCAmelCase , _UpperCAmelCase =(
a[left],
a[pivot],
) # switches the pivot with the left most bound
_UpperCAmelCase =partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
quick_sort_random(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_lowerCamelCase , pivot_index + 1 , _lowerCamelCase ) # recursive quicksort to the right of the pivot point
def lowerCamelCase__ ( ) ->Dict:
_UpperCAmelCase =input("Enter numbers separated by a comma:\n" ).strip()
_UpperCAmelCase =[int(_lowerCamelCase ) for item in user_input.split("," )]
quick_sort_random(_lowerCamelCase , 0 , len(_lowerCamelCase ) )
print(_lowerCamelCase )
if __name__ == "__main__":
main()
| 592 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =1
_UpperCAmelCase =3
_UpperCAmelCase =(32, 32)
_UpperCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_snake_case )
return image
@property
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
_UpperCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=_snake_case , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
_UpperCAmelCase =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
_UpperCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase =self.dummy_cond_unet_upscale
_UpperCAmelCase =DDPMScheduler()
_UpperCAmelCase =DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase =self.dummy_vae
_UpperCAmelCase =self.dummy_text_encoder
_UpperCAmelCase =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase =Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase =StableDiffusionUpscalePipeline(
unet=_snake_case , low_res_scheduler=_snake_case , scheduler=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , max_noise_level=350 , )
_UpperCAmelCase =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase ="A painting of a squirrel eating a burger"
_UpperCAmelCase =torch.Generator(device=_snake_case ).manual_seed(0 )
_UpperCAmelCase =sd_pipe(
[prompt] , image=_snake_case , generator=_snake_case , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase =output.images
_UpperCAmelCase =torch.Generator(device=_snake_case ).manual_seed(0 )
_UpperCAmelCase =sd_pipe(
[prompt] , image=_snake_case , generator=_snake_case , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=_snake_case , )[0]
_UpperCAmelCase =image[0, -3:, -3:, -1]
_UpperCAmelCase =image_from_tuple[0, -3:, -3:, -1]
_UpperCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_UpperCAmelCase =np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase =self.dummy_cond_unet_upscale
_UpperCAmelCase =DDPMScheduler()
_UpperCAmelCase =DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase =self.dummy_vae
_UpperCAmelCase =self.dummy_text_encoder
_UpperCAmelCase =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase =Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase =StableDiffusionUpscalePipeline(
unet=_snake_case , low_res_scheduler=_snake_case , scheduler=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , max_noise_level=350 , )
_UpperCAmelCase =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase ="A painting of a squirrel eating a burger"
_UpperCAmelCase =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase =output.images
assert image.shape[0] == 2
_UpperCAmelCase =torch.Generator(device=_snake_case ).manual_seed(0 )
_UpperCAmelCase =sd_pipe(
[prompt] , image=_snake_case , generator=_snake_case , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.dummy_cond_unet_upscale
_UpperCAmelCase =DDPMScheduler()
_UpperCAmelCase =DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase =self.dummy_vae
_UpperCAmelCase =self.dummy_text_encoder
_UpperCAmelCase =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase =Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_UpperCAmelCase =unet.half()
_UpperCAmelCase =text_encoder.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase =StableDiffusionUpscalePipeline(
unet=_snake_case , low_res_scheduler=_snake_case , scheduler=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , max_noise_level=350 , )
_UpperCAmelCase =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase ="A painting of a squirrel eating a burger"
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =sd_pipe(
[prompt] , image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type="np" , ).images
_UpperCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_UpperCAmelCase ="stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
_UpperCAmelCase ="a cat sitting on a park bench"
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =pipe(
prompt=_snake_case , image=_snake_case , generator=_snake_case , output_type="np" , )
_UpperCAmelCase =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_UpperCAmelCase ="stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
_snake_case , torch_dtype=torch.floataa , )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
_UpperCAmelCase ="a cat sitting on a park bench"
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =pipe(
prompt=_snake_case , image=_snake_case , generator=_snake_case , output_type="np" , )
_UpperCAmelCase =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def SCREAMING_SNAKE_CASE ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase ="stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
_snake_case , torch_dtype=torch.floataa , )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase ="a cat sitting on a park bench"
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =pipe(
prompt=_snake_case , image=_snake_case , generator=_snake_case , num_inference_steps=5 , output_type="np" , )
_UpperCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 592 | 1 |
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _lowercase ( unittest.TestCase ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=56 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : int=99 , SCREAMING_SNAKE_CASE_ : Optional[int]=32 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : Any=7 , SCREAMING_SNAKE_CASE_ : Tuple="gelu_new" , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=512 , SCREAMING_SNAKE_CASE_ : Tuple=16 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : str=0.0_2 , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : Tuple="block_sparse" , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Any=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , ) -> Union[str, Any]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_attention_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_choices
__snake_case = rescale_embeddings
__snake_case = attention_type
__snake_case = use_bias
__snake_case = block_size
__snake_case = num_random_blocks
def a ( self : Dict ) -> Tuple:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_attention_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def a ( self : int ) -> Optional[int]:
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : int = False
def a ( self : Any ) -> Tuple:
__snake_case = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a ( self : List[str] ) -> Tuple:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a ( self : Optional[Any] ) -> Tuple:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a ( self : List[str] ) -> Tuple:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a ( self : Union[str, Any] ) -> Dict:
super().test_hidden_states_output()
@slow
def a ( self : int ) -> Dict:
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Tuple:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def a ( self : Tuple ) -> Dict:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any=None , **SCREAMING_SNAKE_CASE_ : str ):
return model(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with self.subTest('JIT Enabled' ):
__snake_case = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__snake_case = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
def a ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str]=1e-5 , SCREAMING_SNAKE_CASE_ : List[Any]="outputs" , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ) -> Dict:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _lowercase ( unittest.TestCase ):
def a ( self : int ) -> List[str]:
__snake_case = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case = tempfile.mkdtemp()
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
__snake_case = 'hf-internal-testing/ngram-beam-search-decoder'
def a ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
__snake_case = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Dict:
shutil.rmtree(self.tmpdirname )
def a ( self : int ) -> Tuple:
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Union[str, Any]:
__snake_case = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a ( self : str ) -> Tuple:
__snake_case = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a ( self : List[str] ) -> List[str]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = floats_list((3, 1000) )
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : Tuple ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = 'This is a test string'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(2, 10, 16) , SCREAMING_SNAKE_CASE_ : Dict=77 ) -> Dict:
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ )
__snake_case = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
__snake_case = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case , __snake_case = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def a ( self : Any ) -> Dict:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 15
__snake_case = -2_0.0
__snake_case = -4.0
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
__snake_case = [d[0][2] for d in decoded_decoder_out]
__snake_case = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
__snake_case = self._get_dummy_logits()
__snake_case = 2.0
__snake_case = 5.0
__snake_case = -2_0.0
__snake_case = True
__snake_case = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
__snake_case = decoded_processor_out.text
__snake_case = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
__snake_case = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__snake_case = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> List[str]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Dict:
__snake_case = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = processor.decoder.model_container[processor.decoder._model_key]
__snake_case = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
__snake_case = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Any ) -> List[Any]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = floats_list((3, 1000) )
__snake_case = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case = self._get_dummy_logits()
__snake_case = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a ( self : Dict ) -> Optional[int]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_decoder()
__snake_case = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
__snake_case = [d[key] for d in offsets]
return retrieved_list
def a ( self : Optional[int] ) -> str:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()[0]
__snake_case = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def a ( self : Optional[Any] ) -> Optional[int]:
__snake_case = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case = self._get_dummy_logits()
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a ( self : Optional[Any] ) -> Optional[Any]:
import torch
__snake_case = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
__snake_case = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000 ) )
__snake_case = iter(SCREAMING_SNAKE_CASE_ )
__snake_case = next(SCREAMING_SNAKE_CASE_ )
__snake_case = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
__snake_case = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
__snake_case = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
__snake_case = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
__snake_case = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
__snake_case = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.0_1 ) )
| 56 | 1 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowercase (_lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
__lowerCAmelCase = precision
__lowerCAmelCase = ceil(precision / 14 )
__lowerCAmelCase = 42_6880 * Decimal(1_0005 ).sqrt()
__lowerCAmelCase = 1
__lowerCAmelCase = 1359_1409
__lowerCAmelCase = Decimal(_lowerCAmelCase )
for k in range(1 , _lowerCAmelCase ):
__lowerCAmelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCAmelCase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 50
print(F"The first {n} digits of pi is: {pi(n)}")
| 710 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = StableDiffusionXLImgaImgPipeline
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''}
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
_snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=snake_case_ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__lowerCAmelCase = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
__lowerCAmelCase = CLIPTextModel(snake_case_ )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=snake_case_ )
__lowerCAmelCase = CLIPTextModelWithProjection(snake_case_ )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=snake_case_ )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def A__ ( self , snake_case_ , snake_case_=0 ) -> List[str]:
__lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
__lowerCAmelCase = image / 2 + 0.5
if str(snake_case_ ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(snake_case_ )
else:
__lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def A__ ( self ) -> Tuple:
__lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**snake_case_ )
__lowerCAmelCase = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = self.get_dummy_inputs(snake_case_ )
__lowerCAmelCase = sd_pipe(**snake_case_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ) -> int:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def A__ ( self ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def A__ ( self ) -> int:
pass
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**snake_case_ )
__lowerCAmelCase = sd_pipe.to(snake_case_ )
__lowerCAmelCase = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
# forward without prompt embeds
__lowerCAmelCase = self.get_dummy_inputs(snake_case_ )
__lowerCAmelCase = 3 * ["""this is a negative prompt"""]
__lowerCAmelCase = negative_prompt
__lowerCAmelCase = 3 * [inputs["""prompt"""]]
__lowerCAmelCase = sd_pipe(**snake_case_ )
__lowerCAmelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__lowerCAmelCase = self.get_dummy_inputs(snake_case_ )
__lowerCAmelCase = 3 * ["""this is a negative prompt"""]
__lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = sd_pipe.encode_prompt(snake_case_ , negative_prompt=snake_case_ )
__lowerCAmelCase = sd_pipe(
**snake_case_ , prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , pooled_prompt_embeds=snake_case_ , negative_pooled_prompt_embeds=snake_case_ , )
__lowerCAmelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self , snake_case_ , snake_case_="cpu" , snake_case_=torch.floataa , snake_case_=0 ) -> Optional[int]:
__lowerCAmelCase = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__lowerCAmelCase = np.random.RandomState(snake_case_ ).standard_normal((1, 4, 64, 64) )
__lowerCAmelCase = torch.from_numpy(snake_case_ ).to(device=snake_case_ , dtype=snake_case_ )
__lowerCAmelCase = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def A__ ( self ) -> Any:
__lowerCAmelCase = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = self.get_inputs(snake_case_ )
__lowerCAmelCase = pipe(**snake_case_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 573 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
@slow
def lowercase ( self : Tuple ):
_UpperCAmelCase = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
_UpperCAmelCase = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )['last_hidden_state']
_UpperCAmelCase = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
_UpperCAmelCase = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 236 |
from __future__ import annotations
import os
from typing import Any
import requests
lowerCAmelCase : str = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCAmelCase : Optional[Any] = BASE_URL + '/user'
# https://github.com/settings/tokens
lowerCAmelCase : Optional[int] = os.environ.get('USER_TOKEN', '')
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = {
'Authorization': f"token {auth_token}",
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(a , headers=a ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'{key}: {value}')
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 511 | 0 |
lowerCAmelCase__ = range(2, 2_0 + 1)
lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Any = sum(a_i[j] for j in range(lowerCamelCase__ , len(lowerCamelCase__ ) ) )
lowercase__ : Dict = sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase__ ) , lowerCamelCase__ ) ) )
lowercase__ , lowercase__ : Optional[Any] = 0, 0
lowercase__ : Any = n - i
lowercase__ : List[str] = memo.get(lowerCamelCase__ )
if sub_memo is not None:
lowercase__ : Any = sub_memo.get(lowerCamelCase__ )
if jumps is not None and len(lowerCamelCase__ ) > 0:
# find and make the largest jump without going over
lowercase__ : Union[str, Any] = -1
for _k in range(len(lowerCamelCase__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase__ : List[str] = _k
break
if max_jump >= 0:
lowercase__ , lowercase__ , lowercase__ : Dict = jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase__ : List[Any] = diff + c
for j in range(min(lowerCamelCase__ , len(lowerCamelCase__ ) ) ):
lowercase__ , lowercase__ : List[str] = divmod(lowerCamelCase__ , 10 )
if new_c > 0:
add(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
lowercase__ : Tuple = []
else:
lowercase__ : str = {c: []}
lowercase__ : str = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase__ , lowercase__ : int = next_term(lowerCamelCase__ , k - 1 , i + dn , lowerCamelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase__ , lowercase__ : List[Any] = compute(lowerCamelCase__ , lowerCamelCase__ , i + dn , lowerCamelCase__ )
diff += _diff
dn += terms_jumped
lowercase__ : Optional[int] = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase__ : List[Any] = 0
while j < len(lowerCamelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase__ , (diff, dn, k) )
return (diff, dn)
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(lowerCamelCase__ ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase__ : Dict = i
lowercase__ , lowercase__ , lowercase__ : Tuple = 0, 0, 0
for j in range(len(lowerCamelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase__ : Optional[Any] = ds_c + ds_b
diff += addend
lowercase__ : Optional[Any] = 0
for j in range(lowerCamelCase__ ):
lowercase__ : List[str] = a_i[j] + addend
lowercase__ , lowercase__ : Tuple = divmod(lowerCamelCase__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return diff, i - start_i
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for j in range(lowerCamelCase__ , len(lowerCamelCase__ ) ):
lowercase__ : Optional[int] = digits[j] + addend
if s >= 10:
lowercase__ , lowercase__ : Optional[Any] = divmod(lowerCamelCase__ , 10 )
lowercase__ : str = addend // 10 + quotient
else:
lowercase__ : Any = s
lowercase__ : Union[str, Any] = addend // 10
if addend == 0:
break
while addend > 0:
lowercase__ , lowercase__ : Union[str, Any] = divmod(lowerCamelCase__ , 10 )
digits.append(lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ = 10**15 ):
"""simple docstring"""
lowercase__ : List[Any] = [1]
lowercase__ : Tuple = 1
lowercase__ : str = 0
while True:
lowercase__ , lowercase__ : Union[str, Any] = next_term(lowerCamelCase__ , 20 , i + dn , lowerCamelCase__ )
dn += terms_jumped
if dn == n - i:
break
lowercase__ : Tuple = 0
for j in range(len(lowerCamelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 700 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""pixel_values"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Dict , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : str = do_rescale
lowercase__ : Optional[Any] = rescale_factor
lowercase__ : Any = do_pad
lowercase__ : Optional[Any] = pad_size
def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] ):
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ):
lowercase__ , lowercase__ : str = get_image_size(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = (old_height // size + 1) * size - old_height
lowercase__ : List[Any] = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : str = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[int] = pad_size if pad_size is not None else self.pad_size
lowercase__ : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
lowercase__ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase__ : Any = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_pad:
lowercase__ : Tuple = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 81 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def a ( UpperCamelCase_ : Union[tf.Tensor, np.ndarray] ) -> List[int]:
if isinstance(UpperCamelCase_ , np.ndarray ):
return list(tensor.shape )
snake_case__ =tf.shape(UpperCamelCase_ )
if tensor.shape == tf.TensorShape(UpperCamelCase_ ):
return dynamic
snake_case__ =tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCamelCase_ )]
def a ( UpperCamelCase_ : tf.Tensor , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[str] = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCamelCase_ , name=UpperCamelCase_ )
def a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str=1e-5 , UpperCamelCase_ : List[Any]=-1 ) -> List[Any]:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
snake_case__ , snake_case__ =tf.nn.moments(UpperCamelCase_ , axes=[axis] , keepdims=UpperCamelCase_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
snake_case__ =[1] * inputs.shape.rank
snake_case__ =shape_list(UpperCamelCase_ )[axis]
snake_case__ =tf.reshape(UpperCamelCase_ , UpperCamelCase_ )
snake_case__ =tf.reshape(UpperCamelCase_ , UpperCamelCase_ )
# Compute layer normalization using the batch_normalization
# function.
snake_case__ =tf.nn.batch_normalization(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , offset=UpperCamelCase_ , scale=UpperCamelCase_ , variance_epsilon=UpperCamelCase_ , )
return outputs
def a ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : int=-1 ) -> int:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
snake_case__ =tf.shape(UpperCamelCase_ )
snake_case__ =tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
snake_case__ =tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCamelCase_ , UpperCamelCase_ )
def a ( UpperCamelCase_ : tf.Tensor ) -> tf.Tensor:
if not isinstance(UpperCamelCase_ , tf.Tensor ):
snake_case__ =tf.convert_to_tensor(UpperCamelCase_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
snake_case__ =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
snake_case__ =encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
snake_case__ =(
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def a ( UpperCamelCase_ : tf.Tensor , UpperCamelCase_ : int , UpperCamelCase_ : str = "input_ids" ) -> None:
tf.debugging.assert_less(
UpperCamelCase_ , tf.cast(UpperCamelCase_ , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCamelCase_ )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def a ( UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Optional[Any]:
snake_case__ =64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
snake_case__ =[x for x in data if len(UpperCamelCase_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
snake_case__ =np.asarray(UpperCamelCase_ )
snake_case__ =1
snake_case__ =np.array_split(UpperCamelCase_ , UpperCamelCase_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
snake_case__ =np.array_split(UpperCamelCase_ , UpperCamelCase_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCamelCase_ ):
snake_case__ =chunk_data
else:
snake_case__ =data
def a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict ) -> Optional[int]:
if name in group.attrs:
snake_case__ =[n.decode('utf8' ) if hasattr(UpperCamelCase_ , 'decode' ) else n for n in group.attrs[name]]
else:
snake_case__ =[]
snake_case__ =0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(UpperCamelCase_ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def a ( UpperCamelCase_ : List[str] ) -> List[Any]:
def _expand_single_ad_tensor(UpperCamelCase_ : Dict ):
if isinstance(UpperCamelCase_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCamelCase_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCamelCase_ )
| 538 |
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class a__( snake_case__ , unittest.TestCase ):
a_ : str = BertJapaneseTokenizer
a_ : List[str] = False
a_ : List[Any] = True
def _lowercase ( self ) -> Union[str, Any]:
super().setUp()
snake_case__ =[
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
snake_case__ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _lowercase ( self , _UpperCAmelCase ) -> str:
snake_case__ ='こんにちは、世界。 \nこんばんは、世界。'
snake_case__ ='こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def _lowercase ( self , _UpperCAmelCase ) -> Any:
snake_case__ , snake_case__ =self.get_input_output_texts(_UpperCAmelCase )
snake_case__ =tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
snake_case__ =tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def _lowercase ( self ) -> Dict:
pass # TODO add if relevant
def _lowercase ( self ) -> List[str]:
pass # TODO add if relevant
def _lowercase ( self ) -> str:
pass # TODO add if relevant
def _lowercase ( self ) -> Any:
snake_case__ =self.tokenizer_class(self.vocab_file )
snake_case__ =tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(_UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _lowercase ( self ) -> List[Any]:
snake_case__ =self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(_UpperCAmelCase )
snake_case__ ='こんにちは、世界。\nこんばんは、世界。'
snake_case__ =tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case__ =os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_UpperCAmelCase , 'wb' ) as handle:
pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(_UpperCAmelCase , 'rb' ) as handle:
snake_case__ =pickle.load(_UpperCAmelCase )
snake_case__ =tokenizer_new.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( self ) -> Any:
snake_case__ =MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def _lowercase ( self ) -> str:
try:
snake_case__ =MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def _lowercase ( self ) -> List[str]:
try:
snake_case__ =MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def _lowercase ( self ) -> int:
snake_case__ =MecabTokenizer(do_lower_case=_UpperCAmelCase , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def _lowercase ( self ) -> List[Any]:
try:
snake_case__ =MecabTokenizer(
do_lower_case=_UpperCAmelCase , normalize_text=_UpperCAmelCase , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def _lowercase ( self ) -> List[Any]:
snake_case__ =MecabTokenizer(normalize_text=_UpperCAmelCase , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def _lowercase ( self ) -> List[Any]:
snake_case__ =self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(_UpperCAmelCase )
snake_case__ ='こんにちは、世界。\nこんばんは、世界。'
snake_case__ =tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case__ =os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_UpperCAmelCase , 'wb' ) as handle:
pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(_UpperCAmelCase , 'rb' ) as handle:
snake_case__ =pickle.load(_UpperCAmelCase )
snake_case__ =tokenizer_new.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_sudachi
def _lowercase ( self ) -> Union[str, Any]:
snake_case__ =SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def _lowercase ( self ) -> List[Any]:
snake_case__ =SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def _lowercase ( self ) -> Tuple:
snake_case__ =SudachiTokenizer(do_lower_case=_UpperCAmelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def _lowercase ( self ) -> Dict:
snake_case__ =SudachiTokenizer(normalize_text=_UpperCAmelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def _lowercase ( self ) -> Optional[int]:
snake_case__ =SudachiTokenizer(trim_whitespace=_UpperCAmelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(_UpperCAmelCase )
snake_case__ ='こんにちは、世界。\nこんばんは、世界。'
snake_case__ =tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case__ =os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_UpperCAmelCase , 'wb' ) as handle:
pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(_UpperCAmelCase , 'rb' ) as handle:
snake_case__ =pickle.load(_UpperCAmelCase )
snake_case__ =tokenizer_new.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_jumanpp
def _lowercase ( self ) -> int:
snake_case__ =JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def _lowercase ( self ) -> Tuple:
snake_case__ =JumanppTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def _lowercase ( self ) -> Tuple:
snake_case__ =JumanppTokenizer(normalize_text=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def _lowercase ( self ) -> int:
snake_case__ =JumanppTokenizer(trim_whitespace=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def _lowercase ( self ) -> str:
snake_case__ =JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
snake_case__ ={}
for i, token in enumerate(_UpperCAmelCase ):
snake_case__ =i
snake_case__ =WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def _lowercase ( self ) -> Tuple:
snake_case__ =BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
snake_case__ =tokenizer.subword_tokenizer
snake_case__ =subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(_UpperCAmelCase , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
snake_case__ =subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(_UpperCAmelCase , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
snake_case__ =tokenizer.encode('ありがとう。' , add_special_tokens=_UpperCAmelCase )
snake_case__ =tokenizer.encode('どういたしまして。' , add_special_tokens=_UpperCAmelCase )
snake_case__ =tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
snake_case__ =tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__( snake_case__ , unittest.TestCase ):
a_ : int = BertJapaneseTokenizer
a_ : Optional[Any] = False
def _lowercase ( self ) -> Any:
super().setUp()
snake_case__ =['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
snake_case__ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _lowercase ( self , **_UpperCAmelCase ) -> Union[str, Any]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **_UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase ) -> Optional[Any]:
snake_case__ ='こんにちは、世界。 \nこんばんは、世界。'
snake_case__ ='こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def _lowercase ( self ) -> List[Any]:
pass # TODO add if relevant
def _lowercase ( self ) -> Union[str, Any]:
pass # TODO add if relevant
def _lowercase ( self ) -> Tuple:
pass # TODO add if relevant
def _lowercase ( self ) -> Any:
snake_case__ =self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
snake_case__ =tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
_UpperCAmelCase , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
snake_case__ ={}
for i, token in enumerate(_UpperCAmelCase ):
snake_case__ =i
snake_case__ =CharacterTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def _lowercase ( self ) -> Any:
snake_case__ =self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
snake_case__ =tokenizer.encode('ありがとう。' , add_special_tokens=_UpperCAmelCase )
snake_case__ =tokenizer.encode('どういたしまして。' , add_special_tokens=_UpperCAmelCase )
snake_case__ =tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
snake_case__ =tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__( unittest.TestCase ):
def _lowercase ( self ) -> Optional[Any]:
snake_case__ ='cl-tohoku/bert-base-japanese'
snake_case__ =AutoTokenizer.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
class a__( unittest.TestCase ):
def _lowercase ( self ) -> int:
snake_case__ ='cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(_UpperCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
snake_case__ ='bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(_UpperCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 538 | 1 |
from __future__ import annotations
def A__ ( snake_case_ : int ):
SCREAMING_SNAKE_CASE__: Optional[int]= str(snake_case_ )
return len(snake_case_ ) == 9 and set(snake_case_ ) == set('''123456789''' )
def A__ ( ):
for base_num in range(9_999 , 4_999 , -1 ):
SCREAMING_SNAKE_CASE__: str= 100_002 * base_num
if is_9_pandigital(snake_case_ ):
return candidate
for base_num in range(333 , 99 , -1 ):
SCREAMING_SNAKE_CASE__: int= 1_002_003 * base_num
if is_9_pandigital(snake_case_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 107 | import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase_ : Tuple = getLogger(__name__)
def A__ ( snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : str , snake_case_ : int = 8 , snake_case_ : int = 1_024 , snake_case_ : Union[str, Any]="val" , snake_case_ : Tuple=None , snake_case_ : int=False , snake_case_ : Optional[Any]="summarization" , snake_case_ : Optional[Any]=None , snake_case_ : List[Any]=1 , snake_case_ : Dict = None , snake_case_ : int="" , **snake_case_ : Any , ):
SCREAMING_SNAKE_CASE__: List[str]= str(snake_case_ )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=snake_case_ )
SCREAMING_SNAKE_CASE__: str= Path(snake_case_ )
SCREAMING_SNAKE_CASE__: Tuple= save_dir.joinpath(F'rank_{local_rank}_output.json' )
torch.cuda.set_device(snake_case_ )
SCREAMING_SNAKE_CASE__: Tuple= AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda()
if fpaa:
SCREAMING_SNAKE_CASE__: Dict= model.half()
# determine if we need to increase num_beams
use_task_specific_params(snake_case_ , snake_case_ ) # update config with task specific params
SCREAMING_SNAKE_CASE__: List[Any]= generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
SCREAMING_SNAKE_CASE__: str= num_return_sequences
SCREAMING_SNAKE_CASE__: Optional[int]= AutoTokenizer.from_pretrained(snake_case_ )
logger.info(F'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
SCREAMING_SNAKE_CASE__: Dict= tokenizer.model_max_length
if prefix is None:
SCREAMING_SNAKE_CASE__: List[Any]= prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
SCREAMING_SNAKE_CASE__: Optional[int]= SeqaSeqDataset(
snake_case_ , snake_case_ , snake_case_ , max_target_length=1_024 , type_path=snake_case_ , n_obs=snake_case_ , prefix=snake_case_ , **snake_case_ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
SCREAMING_SNAKE_CASE__: Union[str, Any]= ds.make_sortish_sampler(snake_case_ , distributed=snake_case_ , add_extra_examples=snake_case_ , shuffle=snake_case_ )
SCREAMING_SNAKE_CASE__: List[Any]= DataLoader(snake_case_ , sampler=snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn )
SCREAMING_SNAKE_CASE__: List[Any]= []
for batch in tqdm(snake_case_ ):
SCREAMING_SNAKE_CASE__: List[Any]= model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=snake_case_ , num_beams=snake_case_ , **snake_case_ , )
SCREAMING_SNAKE_CASE__: int= tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
SCREAMING_SNAKE_CASE__: int= batch['''ids''']
if num_return_sequences > 1:
SCREAMING_SNAKE_CASE__: List[Any]= chunks(snake_case_ , snake_case_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(snake_case_ ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(snake_case_ , snake_case_ )
return results, sampler.num_replicas
def A__ ( ):
SCREAMING_SNAKE_CASE__: List[Any]= argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=snake_case_ , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=snake_case_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=snake_case_ , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=snake_case_ , default=snake_case_ )
parser.add_argument(
'''--type_path''' , type=snake_case_ , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=snake_case_ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=snake_case_ , default=8 , required=snake_case_ , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=snake_case_ , default=-1 , required=snake_case_ , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=snake_case_ , default=1 , required=snake_case_ , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=snake_case_ , default=600 , required=snake_case_ , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=snake_case_ , default=snake_case_ , required=snake_case_ )
parser.add_argument('''--tgt_lang''' , type=snake_case_ , default=snake_case_ , required=snake_case_ )
parser.add_argument(
'''--prefix''' , type=snake_case_ , required=snake_case_ , default=snake_case_ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
SCREAMING_SNAKE_CASE__: Dict= time.time()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= parser.parse_known_args()
SCREAMING_SNAKE_CASE__: Tuple= parse_numeric_n_bool_cl_kwargs(snake_case_ )
if generate_kwargs and args.local_rank <= 0:
print(F'parsed the following generate kwargs: {generate_kwargs}' )
SCREAMING_SNAKE_CASE__: List[Any]= Path(args.save_dir + '''_tmp''' )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking.
SCREAMING_SNAKE_CASE__: Dict= list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(F'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
SCREAMING_SNAKE_CASE__: List[str]= {}
if args.src_lang is not None:
SCREAMING_SNAKE_CASE__: str= args.src_lang
if args.tgt_lang is not None:
SCREAMING_SNAKE_CASE__: Any= args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=snake_case_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= eval_data_dir(
args.data_dir , snake_case_ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=snake_case_ , **snake_case_ , )
if args.local_rank <= 0:
SCREAMING_SNAKE_CASE__: str= Path(args.save_dir )
save_dir.mkdir(exist_ok=snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[Any]= gather_results_from_each_node(snake_case_ , snake_case_ , args.sync_timeout )
SCREAMING_SNAKE_CASE__: Union[str, Any]= combine_partial_results(snake_case_ )
if args.num_return_sequences > 1:
SCREAMING_SNAKE_CASE__: Optional[Any]= save_dir.joinpath('''pseudolabel_results.json''' )
print(F'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(snake_case_ , snake_case_ )
return
SCREAMING_SNAKE_CASE__: Union[str, Any]= Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(snake_case_ ) as f:
SCREAMING_SNAKE_CASE__: List[str]= [x.rstrip() for x in f.readlines()][: len(snake_case_ )]
# Calculate metrics, save metrics, and save _generations.txt
SCREAMING_SNAKE_CASE__: Optional[int]= '''translation''' in args.task
SCREAMING_SNAKE_CASE__: str= calculate_bleu if calc_bleu else calculate_rouge
SCREAMING_SNAKE_CASE__: Dict= '''bleu''' if calc_bleu else '''rouge'''
SCREAMING_SNAKE_CASE__: Dict= score_fn(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE__: List[str]= len(snake_case_ )
SCREAMING_SNAKE_CASE__: Dict= time.time() - start_time
SCREAMING_SNAKE_CASE__: List[str]= round(runtime / metrics['''n_obs'''] , 4 )
SCREAMING_SNAKE_CASE__: str= num_replicas
# TODO(@stas00): add whatever metadata to metrics
SCREAMING_SNAKE_CASE__: str= save_dir.joinpath(F'{args.type_path}_{metric_name}.json' )
save_json(snake_case_ , snake_case_ , indent=snake_case_ )
print(snake_case_ )
write_txt_file(snake_case_ , save_dir.joinpath(F'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(snake_case_ , save_dir.joinpath(F'{args.type_path}.target' ) )
else:
shutil.rmtree(snake_case_ )
def A__ ( snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: int= []
for partial_result in partial_results:
records.extend(snake_case_ )
SCREAMING_SNAKE_CASE__: str= sorted(snake_case_ , key=lambda snake_case_ : x["id"] )
SCREAMING_SNAKE_CASE__: int= [x['''pred'''] for x in records]
return preds
def A__ ( snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : int ):
# WAIT FOR lots of .json files
SCREAMING_SNAKE_CASE__: Optional[int]= time.time()
logger.info('''waiting for all nodes to finish''' )
SCREAMING_SNAKE_CASE__: Optional[Any]= None
while (time.time() - start_wait) < timeout:
SCREAMING_SNAKE_CASE__: List[Any]= list(save_dir.glob('''rank_*.json''' ) )
if len(snake_case_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
SCREAMING_SNAKE_CASE__: Optional[int]= lmap(snake_case_ , snake_case_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 107 | 1 |
import inspect
import unittest
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def SCREAMING_SNAKE_CASE ( self ):
import diffusers
from diffusers.dependency_versions_table import deps
_UpperCAmelCase =inspect.getmembers(_snake_case , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
_UpperCAmelCase ="k-diffusion"
elif backend == "invisible_watermark":
_UpperCAmelCase ="invisible-watermark"
assert backend in deps, F"{backend} is not in the deps table!"
| 408 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _a ( A__ ):
"""simple docstring"""
snake_case ="""EncodecFeatureExtractor"""
snake_case =("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , _snake_case , _snake_case ):
super().__init__(_snake_case , _snake_case )
_UpperCAmelCase =self.feature_extractor
_UpperCAmelCase =False
def SCREAMING_SNAKE_CASE ( self , _snake_case=None , _snake_case=None , _snake_case=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_snake_case , language=_snake_case , no_timestamps=_snake_case )
def __call__( self , *_snake_case , **_snake_case ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_snake_case , **_snake_case )
_UpperCAmelCase =kwargs.pop("audio" , _snake_case )
_UpperCAmelCase =kwargs.pop("sampling_rate" , _snake_case )
_UpperCAmelCase =kwargs.pop("text" , _snake_case )
if len(_snake_case ) > 0:
_UpperCAmelCase =args[0]
_UpperCAmelCase =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
_UpperCAmelCase =self.tokenizer(_snake_case , **_snake_case )
if audio is not None:
_UpperCAmelCase =self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_UpperCAmelCase =audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
_UpperCAmelCase =audio_inputs["padding_mask"]
return inputs
def SCREAMING_SNAKE_CASE ( self , *_snake_case , **_snake_case ):
_UpperCAmelCase =kwargs.pop("audio" , _snake_case )
_UpperCAmelCase =kwargs.pop("padding_mask" , _snake_case )
if len(_snake_case ) > 0:
_UpperCAmelCase =args[0]
_UpperCAmelCase =args[1:]
if audio_values is not None:
return self._decode_audio(_snake_case , padding_mask=_snake_case )
else:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE ( self , *_snake_case , **_snake_case ):
return self.tokenizer.decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None ):
_UpperCAmelCase =to_numpy(_snake_case )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase =audio_values.shape
if padding_mask is None:
return list(_snake_case )
_UpperCAmelCase =to_numpy(_snake_case )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_UpperCAmelCase =seq_len - padding_mask.shape[-1]
_UpperCAmelCase =1 - self.feature_extractor.padding_value
_UpperCAmelCase =np.pad(_snake_case , ((0, 0), (0, difference)) , "constant" , constant_values=_snake_case )
_UpperCAmelCase =audio_values.tolist()
for i in range(_snake_case ):
_UpperCAmelCase =np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_UpperCAmelCase =sliced_audio.reshape(_snake_case , -1 )
return audio_values
| 408 | 1 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowercase :
@staticmethod
def UpperCamelCase__ ( *A_ , **A_ ) ->Any:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowercase (unittest.TestCase ):
_UpperCamelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
__lowerCAmelCase : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def UpperCamelCase__ ( self , A_ , A_ ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = object_detector(examples[0] , threshold=0.0 )
__lowerCAmelCase : int = len(A_ )
self.assertGreater(A_ , 0 )
self.assertEqual(
A_ , [
{
'''score''': ANY(A_ ),
'''label''': ANY(A_ ),
'''box''': {'''xmin''': ANY(A_ ), '''ymin''': ANY(A_ ), '''xmax''': ANY(A_ ), '''ymax''': ANY(A_ )},
}
for i in range(A_ )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
pass
@require_torch
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
__lowerCAmelCase : Union[str, Any] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'''score''': 0.7_235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6_748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6_419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] , )
__lowerCAmelCase : List[Any] = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[
{'''score''': 0.7_235, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_218, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.7_184, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.6_748, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_656, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_614, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.6_456, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.6_419, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] , )
@require_torch
@slow
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = pipeline('''zero-shot-object-detection''' )
__lowerCAmelCase : Dict = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] , )
__lowerCAmelCase : Union[str, Any] = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
pass
@require_torch
@slow
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = 0.2
__lowerCAmelCase : Tuple = pipeline('''zero-shot-object-detection''' )
__lowerCAmelCase : Dict = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=A_ , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] , )
@require_torch
@slow
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : Dict = 2
__lowerCAmelCase : List[Any] = pipeline('''zero-shot-object-detection''' )
__lowerCAmelCase : Tuple = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=A_ , )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] , )
| 715 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 583 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = filter(lambda lowercase__ : p.requires_grad , model.parameters() )
lowerCAmelCase__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_UpperCAmelCase : List[str] = logging.getLogger(__name__)
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any ) -> str:
'''simple docstring'''
if metric == "rouge2":
lowerCAmelCase__ = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
lowerCAmelCase__ = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
lowerCAmelCase__ = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
lowerCAmelCase__ = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
''' function.''' )
lowerCAmelCase__ = ModelCheckpoint(
dirpath=lowercase__ , filename=lowercase__ , monitor=f'val_{metric}' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
return EarlyStopping(
monitor=f'val_{metric}' , mode='''min''' if '''loss''' in metric else '''max''' , patience=lowercase__ , verbose=lowercase__ , )
class lowerCAmelCase_ ( pl.Callback ):
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase__ = {f'lr_group_{i}': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE_ )
@rank_zero_only
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : pl.Trainer , SCREAMING_SNAKE_CASE_ : pl.LightningModule , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str]=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
lowerCAmelCase__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
lowerCAmelCase__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowerCAmelCase__ = od / '''test_results.txt'''
lowerCAmelCase__ = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCAmelCase__ = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
lowerCAmelCase__ = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
generations_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , '''a+''' ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE_ ):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCAmelCase__ = metrics[key]
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
lowerCAmelCase__ = val.item()
lowerCAmelCase__ = f'{key}: {val:.6f}\n'
writer.write(SCREAMING_SNAKE_CASE_ )
if not save_generations:
return
if "preds" in metrics:
lowerCAmelCase__ = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(SCREAMING_SNAKE_CASE_ )
@rank_zero_only
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
try:
lowerCAmelCase__ = pl_module.model.model.num_parameters()
except AttributeError:
lowerCAmelCase__ = pl_module.model.num_parameters()
lowerCAmelCase__ = count_trainable_parameters(SCREAMING_SNAKE_CASE_ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : pl.Trainer , SCREAMING_SNAKE_CASE_ : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''test''' )
@rank_zero_only
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : pl.Trainer , SCREAMING_SNAKE_CASE_ : str ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 668 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_UpperCAmelCase : int = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
UpperCamelCase_ :int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the training data.'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
UpperCamelCase_ :Optional[str] = field(default=snake_case__ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __snake_case ( self : Union[str, Any] ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCAmelCase__ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCAmelCase__ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :str = field(
default=snake_case__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
UpperCamelCase_ :str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowerCAmelCase_ () -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
datasets.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCAmelCase__ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCAmelCase__ = data_args.train_file.split('''.''' )[-1]
lowerCAmelCase__ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCAmelCase__ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCAmelCase__ = load_dataset('''csv''' , data_files=lowercase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCAmelCase__ = load_dataset('''json''' , data_files=lowercase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCAmelCase__ = raw_datasets['''train'''].features['''label'''].names
lowerCAmelCase__ = len(lowercase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowerCAmelCase__ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase__ , )
lowerCAmelCase__ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase__ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase__ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCAmelCase__ = {'''Refused''': 0, '''Entailed''': 1}
lowerCAmelCase__ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
lowerCAmelCase__ = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase__ : Any ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase__ : Dict ):
lowerCAmelCase__ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCAmelCase__ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowerCAmelCase__ = examples['''statement''']
lowerCAmelCase__ = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
lowerCAmelCase__ = tokenizer(lowercase__ , lowercase__ , padding=lowercase__ , max_length=lowercase__ , truncation=lowercase__ )
lowerCAmelCase__ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCAmelCase__ = raw_datasets.map(
lowercase__ , batched=lowercase__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCAmelCase__ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCAmelCase__ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCAmelCase__ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCAmelCase__ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCAmelCase__ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase__ ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase__ : EvalPrediction ):
lowerCAmelCase__ = p.predictions[0] if isinstance(p.predictions , lowercase__ ) else p.predictions
lowerCAmelCase__ = np.argmax(lowercase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase__ = default_data_collator
elif training_args.fpaa:
lowerCAmelCase__ = DataCollatorWithPadding(lowercase__ , pad_to_multiple_of=8 )
else:
lowerCAmelCase__ = None
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase__ , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=lowercase__ )
lowerCAmelCase__ = train_result.metrics
lowerCAmelCase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ )
)
lowerCAmelCase__ = min(lowercase__ , len(lowercase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , lowercase__ )
trainer.save_metrics('''train''' , lowercase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase__ = trainer.evaluate(eval_dataset=lowercase__ )
lowerCAmelCase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ )
lowerCAmelCase__ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('''eval''' , lowercase__ )
trainer.save_metrics('''eval''' , lowercase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCAmelCase__ = predict_dataset.remove_columns('''label''' )
lowerCAmelCase__ = trainer.predict(lowercase__ , metric_key_prefix='''predict''' ).predictions
lowerCAmelCase__ = np.argmax(lowercase__ , axis=1 )
lowerCAmelCase__ = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowercase__ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowercase__ ):
lowerCAmelCase__ = label_list[item]
writer.write(f'{index}\t{item}\n' )
lowerCAmelCase__ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def lowerCAmelCase_ (lowercase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
"""simple docstring"""
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=99 , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=3 , lowercase__=4 , lowercase__=None , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def A ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self ) -> Optional[Any]:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , use_stable_embedding=lowercase__ , )
def A ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase__ , attention_mask=lowercase__ )
SCREAMING_SNAKE_CASE = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , )
SCREAMING_SNAKE_CASE = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , )
SCREAMING_SNAKE_CASE = model(lowercase__ , attention_mask=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , use_cache=lowercase__ , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , output_hidden_states=lowercase__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , past_key_values=lowercase__ , output_hidden_states=lowercase__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1E-3 ) )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Any = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
UpperCAmelCase_ : Optional[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
UpperCAmelCase_ : Union[str, Any] = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : List[Any] = False
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def A ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*lowercase__ )
def A ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(lowercase__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(lowercase__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(lowercase__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def A ( self ) -> int:
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def A ( self , lowercase__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 10] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(lowercase__ )
original_model.to(lowercase__ )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(lowercase__ ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(lowercase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(lowercase__ )
scaled_model.to(lowercase__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(lowercase__ ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(lowercase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase__ , lowercase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase__ , lowercase__ , atol=1E-5 ) )
| 406 |
"""simple docstring"""
import random
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], [], []
for element in data:
if element < pivot:
less.append(SCREAMING_SNAKE_CASE_ )
elif element > pivot:
greater.append(SCREAMING_SNAKE_CASE_ )
else:
equal.append(SCREAMING_SNAKE_CASE_ )
return less, equal, greater
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(SCREAMING_SNAKE_CASE_ ) or index < 0:
return None
SCREAMING_SNAKE_CASE = items[random.randint(0, len(SCREAMING_SNAKE_CASE_ ) - 1 )]
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _partition(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# must be in larger
else:
return quick_select(SCREAMING_SNAKE_CASE_, index - (m + count) )
| 406 | 1 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
SCREAMING_SNAKE_CASE: Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def _a ( lowerCAmelCase )-> str:
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE_ = k.replace(lowerCAmelCase , lowerCAmelCase )
return k
def _a ( lowerCAmelCase , lowerCAmelCase )-> PegasusForConditionalGeneration:
SCREAMING_SNAKE_CASE_ = DEFAULTS.copy()
cfg_kwargs.update(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = PegasusConfig(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = PegasusForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE_ = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE_ = rename_state_dict_key(lowerCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE_ = v.T
SCREAMING_SNAKE_CASE_ = torch.tensor(lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE_ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE_ = mapping['shared.weight']
SCREAMING_SNAKE_CASE_ = mapping['shared.weight']
SCREAMING_SNAKE_CASE_ = {k: torch.zeros_like(lowerCAmelCase ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch_model.model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _a ( lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" )-> Dict:
SCREAMING_SNAKE_CASE_ = tf.train.list_variables(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = ['Adafactor', 'global_step']
for name, shape in tqdm(lowerCAmelCase , desc='converting tf checkpoint to dict' ):
SCREAMING_SNAKE_CASE_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE_ = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = array
return tf_weights
def _a ( lowerCAmelCase , lowerCAmelCase )-> Optional[Any]:
# save tokenizer first
SCREAMING_SNAKE_CASE_ = Path(lowerCAmelCase ).parent.name
SCREAMING_SNAKE_CASE_ = task_specific_params[F'''summarization_{dataset}''']['max_position_embeddings']
SCREAMING_SNAKE_CASE_ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(lowerCAmelCase )
# convert model
SCREAMING_SNAKE_CASE_ = get_tf_weights_as_numpy(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
SCREAMING_SNAKE_CASE_ = task_specific_params
SCREAMING_SNAKE_CASE_ = convert_pegasus(lowerCAmelCase , lowerCAmelCase )
torch_model.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(lowerCAmelCase , Path(lowerCAmelCase ) / 'pytorch_model.bin' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
SCREAMING_SNAKE_CASE: int = parser.parse_args()
if args.save_dir is None:
SCREAMING_SNAKE_CASE: List[Any] = Path(args.tf_ckpt_path).parent.name
SCREAMING_SNAKE_CASE: Tuple = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir) | 360 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE: Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class lowercase_ (SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , **snake_case__ : str ):
"""simple docstring"""
super().__init__(**snake_case__ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : str , snake_case__ : Union[str, List[str], "Image", List["Image"]] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
return super().__call__(snake_case__ , **snake_case__ )
def __a ( self : str , **snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __a ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : Optional[Any]=None , snake_case__ : List[str]="This is a photo of {}." ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_image(snake_case__ )
SCREAMING_SNAKE_CASE_ = self.image_processor(images=[image] , return_tensors=self.framework )
SCREAMING_SNAKE_CASE_ = candidate_labels
SCREAMING_SNAKE_CASE_ = [hypothesis_template.format(snake_case__ ) for x in candidate_labels]
SCREAMING_SNAKE_CASE_ = self.tokenizer(snake_case__ , return_tensors=self.framework , padding=snake_case__ )
SCREAMING_SNAKE_CASE_ = [text_inputs]
return inputs
def __a ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = model_inputs.pop('candidate_labels' )
SCREAMING_SNAKE_CASE_ = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , snake_case__ ):
SCREAMING_SNAKE_CASE_ = text_inputs[0]
else:
# Batching case.
SCREAMING_SNAKE_CASE_ = text_inputs[0][0]
SCREAMING_SNAKE_CASE_ = self.model(**snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE_ = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __a ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = model_outputs.pop('candidate_labels' )
SCREAMING_SNAKE_CASE_ = model_outputs['logits'][0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE_ = logits.softmax(dim=-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = probs.tolist()
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE_ = [scores]
elif self.framework == "tf":
SCREAMING_SNAKE_CASE_ = stable_softmax(snake_case__ , axis=-1 )
SCREAMING_SNAKE_CASE_ = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
SCREAMING_SNAKE_CASE_ = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(snake_case__ , snake_case__ ) , key=lambda snake_case__ : -x[0] )
]
return result | 360 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
snake_case_ : List[str] = (7_2_0, 1_2_8_0) # Height, Width
snake_case_ : Optional[int] = (0.4, 0.6) # if height or width lower than this scale, drop it.
snake_case_ : Optional[int] = 1 / 1_0_0
snake_case_ : Optional[Any] = ''
snake_case_ : Optional[Any] = ''
snake_case_ : List[str] = ''
snake_case_ : List[str] = 2_5_0
def __UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase: Any = get_dataset(snake_case_ , snake_case_ )
for index in range(snake_case_ ):
UpperCAmelCase: Optional[Any] = random.sample(range(len(snake_case_ ) ) , 4 )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase: Optional[Any] = update_image_and_anno(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase: List[Any] = random_chars(3_2 )
UpperCAmelCase: Union[str, Any] = path.split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCAmelCase: Any = F'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(F'{file_root}.jpg' , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
UpperCAmelCase: int = []
for anno in new_annos:
UpperCAmelCase: List[Any] = anno[3] - anno[1]
UpperCAmelCase: Any = anno[4] - anno[2]
UpperCAmelCase: Optional[int] = anno[1] + width / 2
UpperCAmelCase: Optional[int] = anno[2] + height / 2
UpperCAmelCase: Tuple = F'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(snake_case_ )
with open(F'{file_root}.txt' , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ):
'''simple docstring'''
UpperCAmelCase: int = []
UpperCAmelCase: int = []
for label_file in glob.glob(os.path.join(snake_case_ , "*.txt" ) ):
UpperCAmelCase: Any = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(snake_case_ ) as in_file:
UpperCAmelCase: int = in_file.readlines()
UpperCAmelCase: Union[str, Any] = os.path.join(snake_case_ , F'{label_name}.jpg' )
UpperCAmelCase: Tuple = []
for obj_list in obj_lists:
UpperCAmelCase: Tuple = obj_list.rstrip("\n" ).split(" " )
UpperCAmelCase: List[str] = float(obj[1] ) - float(obj[3] ) / 2
UpperCAmelCase: List[str] = float(obj[2] ) - float(obj[4] ) / 2
UpperCAmelCase: List[str] = float(obj[1] ) + float(obj[3] ) / 2
UpperCAmelCase: List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ):
'''simple docstring'''
UpperCAmelCase: int = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
UpperCAmelCase: List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCAmelCase: Optional[int] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCAmelCase: List[Any] = int(scale_x * output_size[1] )
UpperCAmelCase: int = int(scale_y * output_size[0] )
UpperCAmelCase: Optional[Any] = []
UpperCAmelCase: Dict = []
for i, index in enumerate(snake_case_ ):
UpperCAmelCase: str = all_img_list[index]
path_list.append(snake_case_ )
UpperCAmelCase: List[Any] = all_annos[index]
UpperCAmelCase: Optional[int] = cva.imread(snake_case_ )
if i == 0: # top-left
UpperCAmelCase: Dict = cva.resize(snake_case_ , (divid_point_x, divid_point_y) )
UpperCAmelCase: Optional[int] = img
for bbox in img_annos:
UpperCAmelCase: Optional[Any] = bbox[1] * scale_x
UpperCAmelCase: Tuple = bbox[2] * scale_y
UpperCAmelCase: int = bbox[3] * scale_x
UpperCAmelCase: List[str] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
UpperCAmelCase: Union[str, Any] = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) )
UpperCAmelCase: List[Any] = img
for bbox in img_annos:
UpperCAmelCase: Tuple = scale_x + bbox[1] * (1 - scale_x)
UpperCAmelCase: str = bbox[2] * scale_y
UpperCAmelCase: List[str] = scale_x + bbox[3] * (1 - scale_x)
UpperCAmelCase: int = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
UpperCAmelCase: Any = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) )
UpperCAmelCase: int = img
for bbox in img_annos:
UpperCAmelCase: Any = bbox[1] * scale_x
UpperCAmelCase: int = scale_y + bbox[2] * (1 - scale_y)
UpperCAmelCase: Union[str, Any] = bbox[3] * scale_x
UpperCAmelCase: Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
UpperCAmelCase: Optional[int] = cva.resize(
snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
UpperCAmelCase: str = img
for bbox in img_annos:
UpperCAmelCase: List[str] = scale_x + bbox[1] * (1 - scale_x)
UpperCAmelCase: List[str] = scale_y + bbox[2] * (1 - scale_y)
UpperCAmelCase: Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
UpperCAmelCase: str = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
UpperCAmelCase: Any = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCAmelCase ( snake_case_ : int ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase: List[str] = ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 711 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
snake_case_ : Optional[int] = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
snake_case_ : Optional[int] = logging.get_logger(__name__)
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: Union[str, Any] = '''maskformer'''
lowerCamelCase__: Optional[int] = {'''hidden_size''': '''mask_feature_size'''}
lowerCamelCase__: Optional[int] = ['''resnet''', '''swin''']
lowerCamelCase__: Optional[int] = ['''detr''']
def __init__( self , __snake_case = 2_5_6 , __snake_case = 2_5_6 , __snake_case = 0.1 , __snake_case = False , __snake_case = None , __snake_case = None , __snake_case = 0.02 , __snake_case = 1.0 , __snake_case = 1.0 , __snake_case = 1.0 , __snake_case = 20.0 , __snake_case = None , **__snake_case , ) -> List[Any]:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase: str = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase: Union[str, Any] = backbone_config.pop("model_type" )
UpperCAmelCase: List[str] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase: Any = config_class.from_dict(__snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
F'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase: Tuple = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase: Dict = (
decoder_config.pop("model_type" ) if isinstance(__snake_case , __snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'Transformer Decoder {decoder_type} not supported, please use one of'
F' {",".join(self.decoders_supported )}' )
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase: Union[str, Any] = CONFIG_MAPPING[decoder_type]
UpperCAmelCase: Optional[Any] = config_class.from_dict(__snake_case )
UpperCAmelCase: Any = backbone_config
UpperCAmelCase: Dict = decoder_config
# main feature dimension for the model
UpperCAmelCase: Optional[int] = fpn_feature_size
UpperCAmelCase: Union[str, Any] = mask_feature_size
# initializer
UpperCAmelCase: Tuple = init_std
UpperCAmelCase: Union[str, Any] = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase: Optional[int] = cross_entropy_weight
UpperCAmelCase: List[str] = dice_weight
UpperCAmelCase: List[Any] = mask_weight
UpperCAmelCase: List[str] = use_auxiliary_loss
UpperCAmelCase: List[str] = no_object_weight
UpperCAmelCase: int = output_auxiliary_logits
UpperCAmelCase: int = self.decoder_config.encoder_attention_heads
UpperCAmelCase: Dict = self.decoder_config.num_hidden_layers
super().__init__(**__snake_case )
@classmethod
def A__ ( cls , __snake_case , __snake_case , **__snake_case ) -> Tuple:
"""simple docstring"""
return cls(
backbone_config=__snake_case , decoder_config=__snake_case , **__snake_case , )
def A__ ( self ) -> Dict[str, any]:
"""simple docstring"""
UpperCAmelCase: Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase: Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase: List[Any] = self.decoder_config.to_dict()
UpperCAmelCase: Dict = self.__class__.model_type
return output
| 166 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__snake_case = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
__snake_case = {
"bert-base-uncased": 512,
"bert-large-uncased": 512,
"bert-base-cased": 512,
"bert-large-cased": 512,
"bert-base-multilingual-uncased": 512,
"bert-base-multilingual-cased": 512,
"bert-base-chinese": 512,
"bert-base-german-cased": 512,
"bert-large-uncased-whole-word-masking": 512,
"bert-large-cased-whole-word-masking": 512,
"bert-large-uncased-whole-word-masking-finetuned-squad": 512,
"bert-large-cased-whole-word-masking-finetuned-squad": 512,
"bert-base-cased-finetuned-mrpc": 512,
"bert-base-german-dbmdz-cased": 512,
"bert-base-german-dbmdz-uncased": 512,
"TurkuNLP/bert-base-finnish-cased-v1": 512,
"TurkuNLP/bert-base-finnish-uncased-v1": 512,
"wietsedv/bert-base-dutch-cased": 512,
}
__snake_case = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class UpperCAmelCase ( __snake_case ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = BertTokenizer
def __init__( self : str , __magic_name__ : Optional[int]=None , __magic_name__ : Any=None , __magic_name__ : Dict=True , __magic_name__ : List[str]="[UNK]" , __magic_name__ : List[Any]="[SEP]" , __magic_name__ : Any="[PAD]" , __magic_name__ : List[str]="[CLS]" , __magic_name__ : Dict="[MASK]" , __magic_name__ : Union[str, Any]=True , __magic_name__ : Optional[Any]=None , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __magic_name__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __magic_name__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __magic_name__ ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(__magic_name__ , normalizer_state.pop("""type""" ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**__magic_name__ )
UpperCamelCase = do_lower_case
def lowerCamelCase_ ( self : Tuple , __magic_name__ : Any , __magic_name__ : int=None ):
"""simple docstring"""
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
UpperCamelCase = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 386 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
lowercase = GPTSwaTokenizer
lowercase = False
lowercase = True
lowercase = False
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = GPTSwaTokenizer(__magic_name__ , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : List[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = """This is a test"""
UpperCamelCase = """This is a test"""
return input_text, output_text
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = """<s>"""
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__magic_name__ ) , 2_0_0_0 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = GPTSwaTokenizer(__magic_name__ )
UpperCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
UpperCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
__magic_name__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
UpperCamelCase = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens(__magic_name__ )
# fmt: off
self.assertListEqual(
__magic_name__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = GPTSwaTokenizer(__magic_name__ )
UpperCamelCase = ["""This is a test""", """I was born in 92000, and this is falsé."""]
UpperCamelCase = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__magic_name__ , __magic_name__ ):
self.assertListEqual(tokenizer.encode_fast(__magic_name__ ) , __magic_name__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(tokenizer.decode_fast(__magic_name__ ) , __magic_name__ )
@slow
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
UpperCamelCase = {"""input_ids""": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=__magic_name__ , )
| 386 | 1 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : int = tmp_path / 'cache'
_lowercase : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowercase : Optional[Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : Tuple = tmp_path / 'cache'
_lowercase : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_lowercase : Union[str, Any] = features.copy() if features else default_expected_features
_lowercase : Optional[int] = (
Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowercase : Tuple = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
_lowercase : Tuple = tmp_path / 'cache'
_lowercase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_lowercase : Optional[int] = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_lowercase : Any = parquet_path
elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_lowercase : Optional[Any] = [parquet_path]
_lowercase : Optional[Any] = tmp_path / 'cache'
_lowercase : Union[str, Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_lowercase : Optional[int] = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=("train",) ) -> List[str]:
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for split in splits:
_lowercase : Tuple = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : Tuple = tmp_path / 'cache'
_lowercase : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowercase : Union[str, Any] = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Any = tmp_path / 'cache'
_lowercase : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_lowercase : Dict = features.copy() if features else default_expected_features
_lowercase : str = (
Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowercase : Dict = ParquetDatasetReader({'train': parquet_path} , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if split:
_lowercase : Optional[int] = {split: parquet_path}
else:
_lowercase : Dict = 'train'
_lowercase : Any = {'train': parquet_path, 'test': parquet_path}
_lowercase : Optional[Any] = tmp_path / 'cache'
_lowercase : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_lowercase : Optional[Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : List[Any] = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / 'foo.parquet' )
assert writer.write() > 0
_lowercase : List[str] = pq.ParquetFile(tmp_path / 'foo.parquet' )
_lowercase : Union[str, Any] = pf.read()
assert dataset.data.table == output_table
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : str = str(shared_datadir / 'test_image_rgb.jpg' )
_lowercase : Union[str, Any] = {'image': [image_path]}
_lowercase : Any = Features({'image': Image()} )
_lowercase : Union[str, Any] = Dataset.from_dict(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ )
_lowercase : int = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / 'foo.parquet' )
assert writer.write() > 0
_lowercase : int = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
_lowercase : Union[str, Any] = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=SCREAMING_SNAKE_CASE_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
assert get_writer_batch_size(SCREAMING_SNAKE_CASE_ ) == expected
| 700 |
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
_lowercase : Any = num_of_nodes
_lowercase : list[list[int]] = []
_lowercase : dict[int, int] = {}
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
self.m_edges.append([u_node, v_node, weight] )
def __a ( self , _lowerCAmelCase ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __a ( self , _lowerCAmelCase ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowercase : Optional[int] = self.find_component(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if component_size[u_node] <= component_size[v_node]:
_lowercase : str = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCAmelCase )
elif component_size[u_node] >= component_size[v_node]:
_lowercase : Any = self.find_component(_lowerCAmelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCAmelCase )
def __a ( self ):
_lowercase : Any = []
_lowercase : Optional[Any] = 0
_lowercase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowercase : str = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowercase , _lowercase , _lowercase : List[str] = edge
_lowercase : Union[str, Any] = self.m_component[u]
_lowercase : Union[str, Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowercase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase , _lowercase , _lowercase : int = edge
_lowercase : Optional[int] = self.m_component[u]
_lowercase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
_lowercase : str = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def __magic_name__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 | 0 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase : str = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 663 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : List[str] = '''▁'''
_lowerCamelCase : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
_lowerCamelCase : Dict = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
_lowerCamelCase : Optional[Any] = {'''vinai/bartpho-syllable''': 1_0_2_4}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["input_ids", "attention_mask"]
def __init__( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple, _UpperCAmelCase : Any="<s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[Any]="<s>", _UpperCAmelCase : Dict="<unk>", _UpperCAmelCase : Tuple="<pad>", _UpperCAmelCase : int="<mask>", _UpperCAmelCase : Optional[Dict[str, Any]] = None, **_UpperCAmelCase : Any, ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : Any = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Optional[int] = monolingual_vocab_file
SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE__ : Dict = cnt
cnt += 1
with open(_UpperCAmelCase, "r", encoding="utf-8" ) as f:
for line in f.readlines():
SCREAMING_SNAKE_CASE__ : int = line.strip().split()[0]
SCREAMING_SNAKE_CASE__ : Tuple = len(self.fairseq_tokens_to_ids )
if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE__ : List[Any] = len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int, _UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Any = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self : List[str], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None, _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase, token_ids_a=_UpperCAmelCase, already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def A_ ( self : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self : Tuple, _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase )
def A_ ( self : List[str], _UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def A_ ( self : List[str], _UpperCAmelCase : str ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def A_ ( self : Optional[Any], _UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip()
return out_string
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"], )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase, "wb" ) as fi:
SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file, _UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(_UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 663 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int , lowercase__ : TransformeraDModel , lowercase__ : AutoencoderKL , lowercase__ : KarrasDiffusionSchedulers , lowercase__ : Optional[Dict[int, str]] = None , ) ->Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__ )
# create a imagenet -> id dictionary for easier use
_UpperCamelCase : Optional[int] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
_UpperCamelCase : List[Any] = int(lowercase__ )
_UpperCamelCase : List[str] = dict(sorted(self.labels.items() ) )
def snake_case__ ( self : Any , lowercase__ : Union[str, List[str]] ) ->List[int]:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
_UpperCamelCase : Dict = list(lowercase__ )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Tuple , lowercase__ : List[int] , lowercase__ : float = 4.0 , lowercase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase__ : int = 50 , lowercase__ : Optional[str] = "pil" , lowercase__ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
_UpperCamelCase : int = len(lowercase__ )
_UpperCamelCase : List[Any] = self.transformer.config.sample_size
_UpperCamelCase : Dict = self.transformer.config.in_channels
_UpperCamelCase : str = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
_UpperCamelCase : Optional[int] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
_UpperCamelCase : Tuple = torch.tensor(lowercase__ , device=self.device ).reshape(-1 )
_UpperCamelCase : Any = torch.tensor([1_000] * batch_size , device=self.device )
_UpperCamelCase : str = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
_UpperCamelCase : Union[str, Any] = latent_model_input[: len(lowercase__ ) // 2]
_UpperCamelCase : Dict = torch.cat([half, half] , dim=0 )
_UpperCamelCase : Any = self.scheduler.scale_model_input(lowercase__ , lowercase__ )
_UpperCamelCase : Dict = t
if not torch.is_tensor(lowercase__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_UpperCamelCase : int = latent_model_input.device.type == "mps"
if isinstance(lowercase__ , lowercase__ ):
_UpperCamelCase : str = torch.floataa if is_mps else torch.floataa
else:
_UpperCamelCase : Union[str, Any] = torch.intaa if is_mps else torch.intaa
_UpperCamelCase : Optional[int] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
_UpperCamelCase : Union[str, Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCamelCase : Union[str, Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
_UpperCamelCase : int = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__ ).sample
# perform guidance
if guidance_scale > 1:
_UpperCamelCase , _UpperCamelCase : Optional[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_UpperCamelCase , _UpperCamelCase : List[Any] = torch.split(lowercase__ , len(lowercase__ ) // 2 , dim=0 )
_UpperCamelCase : Union[str, Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_UpperCamelCase : Union[str, Any] = torch.cat([half_eps, half_eps] , dim=0 )
_UpperCamelCase : str = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_UpperCamelCase , _UpperCamelCase : List[Any] = torch.split(lowercase__ , lowercase__ , dim=1 )
else:
_UpperCamelCase : int = noise_pred
# compute previous image: x_t -> x_t-1
_UpperCamelCase : Optional[Any] = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
if guidance_scale > 1:
_UpperCamelCase , _UpperCamelCase : str = latent_model_input.chunk(2 , dim=0 )
else:
_UpperCamelCase : List[Any] = latent_model_input
_UpperCamelCase : List[Any] = 1 / self.vae.config.scaling_factor * latents
_UpperCamelCase : int = self.vae.decode(lowercase__ ).sample
_UpperCamelCase : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_UpperCamelCase : List[str] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCamelCase : Optional[Any] = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__ )
| 204 | '''simple docstring'''
from __future__ import annotations
lowerCAmelCase_ : Optional[Any] = """Muhammad Umer Farooq"""
lowerCAmelCase_ : str = """MIT"""
lowerCAmelCase_ : Optional[Any] = """1.0.0"""
lowerCAmelCase_ : Union[str, Any] = """Muhammad Umer Farooq"""
lowerCAmelCase_ : Any = """contact@muhammadumerfarooq.me"""
lowerCAmelCase_ : Dict = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase__ : str ) ->None:
'''simple docstring'''
super().__init__()
_UpperCamelCase : list[str] = []
_UpperCamelCase : int = domain
def snake_case__ ( self : str , lowercase__ : str , lowercase__ : list[tuple[str, str | None]] ) ->None:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_UpperCamelCase : Optional[Any] = parse.urljoin(self.domain , lowercase__ )
self.urls.append(lowercase__ )
def __A ( UpperCAmelCase ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(UpperCAmelCase ).split("." )[-2:] )
def __A ( UpperCAmelCase ) -> str:
'''simple docstring'''
return parse.urlparse(UpperCAmelCase ).netloc
def __A ( UpperCAmelCase = "https://github.com" ) -> list[str]:
'''simple docstring'''
_UpperCamelCase : int = get_domain_name(UpperCAmelCase )
# Initialize the parser
_UpperCamelCase : Any = Parser(UpperCAmelCase )
try:
# Open URL
_UpperCamelCase : Union[str, Any] = requests.get(UpperCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_UpperCamelCase : int = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_UpperCamelCase : Dict = requests.get(UpperCAmelCase )
# Get the valid email.
_UpperCamelCase : List[str] = re.findall("[a-zA-Z0-9]+@" + domain ,read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(UpperCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = emails_from_url("""https://github.com""")
print(f"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails)))
| 204 | 1 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class __SCREAMING_SNAKE_CASE :
def __init__( self : int ):
'''simple docstring'''
lowercase : List[str] =[2, 1, 2, -1]
lowercase : Tuple =[1, 2, 3, 4]
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : int =len(self.first_signal )
lowercase : Any =len(self.second_signal )
lowercase : Optional[Any] =max(UpperCAmelCase__ , UpperCAmelCase__ )
# create a zero matrix of max_length x max_length
lowercase : Any =[[0] * max_length for i in range(UpperCAmelCase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCAmelCase__ ):
lowercase : Any =deque(self.second_signal )
rotated_signal.rotate(UpperCAmelCase__ )
for j, item in enumerate(UpperCAmelCase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowercase : Optional[Any] =np.matmul(np.transpose(UpperCAmelCase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCAmelCase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 92 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class snake_case__ ( __A ):
UpperCAmelCase : Tuple = """switch_transformers"""
UpperCAmelCase : Optional[int] = ["""past_key_values"""]
UpperCAmelCase : List[Any] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , UpperCamelCase_=32128 , UpperCamelCase_=768 , UpperCamelCase_=64 , UpperCamelCase_=2048 , UpperCamelCase_=64 , UpperCamelCase_=12 , UpperCamelCase_=3 , UpperCamelCase_=12 , UpperCamelCase_=3 , UpperCamelCase_=12 , UpperCamelCase_=8 , UpperCamelCase_=False , UpperCamelCase_=0.01 , UpperCamelCase_="float32" , UpperCamelCase_=False , UpperCamelCase_=32 , UpperCamelCase_=128 , UpperCamelCase_=0.1 , UpperCamelCase_=1e-6 , UpperCamelCase_=0.001 , UpperCamelCase_=0.001 , UpperCamelCase_=1.0 , UpperCamelCase_="relu" , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=0 , UpperCamelCase_=1 , **UpperCamelCase_ , ) -> str:
"""simple docstring"""
a_ : str = vocab_size
a_ : Dict = d_model
a_ : int = d_kv
a_ : Optional[int] = d_ff
a_ : str = num_sparse_encoder_layers
a_ : List[str] = num_layers
a_ : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ : Union[str, Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ : str = self.num_layers // self.num_sparse_encoder_layers
else:
a_ : Union[str, Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ : str = self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ : List[str] = num_heads
a_ : Any = num_experts
a_ : List[Any] = expert_capacity
a_ : Any = router_bias
a_ : str = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
a_ : Optional[int] = router_dtype
a_ : List[Any] = router_ignore_padding_tokens
a_ : Union[str, Any] = relative_attention_num_buckets
a_ : List[str] = relative_attention_max_distance
a_ : List[Any] = dropout_rate
a_ : Any = layer_norm_epsilon
a_ : Tuple = initializer_factor
a_ : Optional[int] = feed_forward_proj
a_ : Dict = use_cache
a_ : str = add_router_probs
a_ : Dict = router_z_loss_coef
a_ : Any = router_aux_loss_coef
a_ : Union[str, Any] = self.feed_forward_proj.split("""-""" )
a_ : str = act_info[-1]
a_ : Optional[Any] = act_info[0] == """gated"""
if len(UpperCamelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCamelCase_ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ : Optional[Any] = """gelu_new"""
super().__init__(
pad_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_ , )
| 419 | 0 |
import sys
from collections import defaultdict
class _UpperCamelCase:
def __init__( self : List[Any] ):
_UpperCAmelCase : Tuple = []
def a__ ( self : Union[str, Any] , _lowerCamelCase : List[Any] ):
return self.node_position[vertex]
def a__ ( self : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Tuple ):
_UpperCAmelCase : Dict = pos
def a__ ( self : int , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCAmelCase : Union[str, Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCAmelCase : Dict = 2 * start + 1
else:
_UpperCAmelCase : Dict = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = heap[smallest_child], positions[smallest_child]
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = (
heap[start],
positions[start],
)
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = temp, tempa
_UpperCAmelCase : Tuple = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _lowerCamelCase )
self.top_to_bottom(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def a__ ( self : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] ):
_UpperCAmelCase : Union[str, Any] = position[index]
while index != 0:
_UpperCAmelCase : str = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCAmelCase : Union[str, Any] = heap[parent]
_UpperCAmelCase : Any = position[parent]
self.set_position(position[parent] , _lowerCamelCase )
else:
_UpperCAmelCase : Optional[int] = val
_UpperCAmelCase : Optional[Any] = temp
self.set_position(_lowerCamelCase , _lowerCamelCase )
break
_UpperCAmelCase : Optional[int] = parent
else:
_UpperCAmelCase : List[str] = val
_UpperCAmelCase : Optional[int] = temp
self.set_position(_lowerCamelCase , 0 )
def a__ ( self : Tuple , _lowerCamelCase : int , _lowerCamelCase : int ):
_UpperCAmelCase : List[Any] = len(_lowerCamelCase ) // 2 - 1
for i in range(_lowerCamelCase , -1 , -1 ):
self.top_to_bottom(_lowerCamelCase , _lowerCamelCase , len(_lowerCamelCase ) , _lowerCamelCase )
def a__ ( self : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] ):
_UpperCAmelCase : Any = positions[0]
_UpperCAmelCase : Tuple = sys.maxsize
self.top_to_bottom(_lowerCamelCase , 0 , len(_lowerCamelCase ) , _lowerCamelCase )
return temp
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = Heap()
_UpperCAmelCase : Optional[Any] = [0] * len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Dict = [-1] * len(_SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCAmelCase : List[Any] = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCAmelCase : int = []
for vertex in range(len(_SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(_SCREAMING_SNAKE_CASE )
heap.node_position.append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Any = []
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : List[Any] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCAmelCase : int = 0
_UpperCAmelCase : Tuple = distance
heap.heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for _ in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase : Dict = heap.delete_minimum(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCAmelCase : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_SCREAMING_SNAKE_CASE )]
):
_UpperCAmelCase : Union[str, Any] = distance
heap.bottom_to_top(
_SCREAMING_SNAKE_CASE , heap.get_position(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Tuple = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowerCamelCase = int(input('Enter number of edges: ').strip())
__lowerCamelCase = defaultdict(list)
for _ in range(edges_number):
__lowerCamelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 328 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , *_lowerCamelCase : int , **_lowerCamelCase : Tuple ):
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 328 | 1 |
from __future__ import annotations
__magic_name__ : Optional[Any] = """Muhammad Umer Farooq"""
__magic_name__ : Dict = """MIT"""
__magic_name__ : Tuple = """1.0.0"""
__magic_name__ : Optional[Any] = """Muhammad Umer Farooq"""
__magic_name__ : Dict = """contact@muhammadumerfarooq.me"""
__magic_name__ : Optional[int] = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class SCREAMING_SNAKE_CASE__ (UpperCAmelCase__ ):
def __init__( self : Tuple , __lowerCamelCase : List[Any] ):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = []
lowerCAmelCase__ = domain
def A__ ( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
"""simple docstring"""
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowerCAmelCase__ = parse.urljoin(self.domain , lowerCamelCase__ )
self.urls.append(lowerCamelCase__ )
def a_ ( __lowerCAmelCase ):
return ".".join(get_sub_domain_name(__lowerCAmelCase ).split('''.''' )[-2:] )
def a_ ( __lowerCAmelCase ):
return parse.urlparse(__lowerCAmelCase ).netloc
def a_ ( __lowerCAmelCase = "https://github.com" ):
lowerCAmelCase__ = get_domain_name(__lowerCAmelCase )
# Initialize the parser
lowerCAmelCase__ = Parser(__lowerCAmelCase )
try:
# Open URL
lowerCAmelCase__ = requests.get(__lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowerCAmelCase__ = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowerCAmelCase__ = requests.get(__lowerCAmelCase )
# Get the valid email.
lowerCAmelCase__ = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowerCAmelCase )
if __name__ == "__main__":
__magic_name__ : int = emails_from_url("""https://github.com""")
print(F"{len(emails)} emails found:")
print("""\n""".join(sorted(emails)))
| 615 | from __future__ import annotations
def snake_case__ ( lowercase , lowercase ):
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(lowercase ):
print(F'''{i}\t\t{d}''' )
def snake_case__ ( lowercase , lowercase , lowercase ):
for j in range(lowercase ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: List[str] = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def snake_case__ ( lowercase , lowercase , lowercase , lowercase ):
lowerCAmelCase_: int = [float("inf" )] * vertex_count
lowerCAmelCase_: Dict = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowercase ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: Any = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
lowerCAmelCase_: List[str] = distance[u] + w
lowerCAmelCase_: str = check_negative_cycle(lowercase , lowercase , lowercase )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
a : List[str] = int(input("""Enter number of vertices: """).strip())
a : Any = int(input("""Enter number of edges: """).strip())
a : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
a , a , a : Optional[int] = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
a : Optional[int] = {"""src""": src, """dst""": dest, """weight""": weight}
a : List[Any] = int(input("""\nEnter shortest path source:""").strip())
a : Tuple = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0) | 613 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A = logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__(self : List[str] , **UpperCAmelCase_ : Dict) ->int:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCamelCase__: List[str] =deprecated_arg[3:]
lowerCamelCase__: Optional[Any] =not kwargs.pop(_lowerCAmelCase)
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""")
lowerCamelCase__: Tuple =kwargs.pop("tpu_name" , self.tpu_name)
lowerCamelCase__: Dict =kwargs.pop("device_idx" , self.device_idx)
lowerCamelCase__: Dict =kwargs.pop("eager_mode" , self.eager_mode)
lowerCamelCase__: Dict =kwargs.pop("use_xla" , self.use_xla)
super().__init__(**_lowerCAmelCase)
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Name of TPU"} , )
lowercase_ = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowercase_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Benchmark models in eager model."} )
lowercase_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["tf"])
lowerCamelCase__: Union[str, Any] =None
if self.tpu:
try:
if self.tpu_name:
lowerCamelCase__: Optional[Any] =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name)
else:
lowerCamelCase__: Dict =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowerCamelCase__: List[str] =None
return tpu
@cached_property
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Any:
'''simple docstring'''
requires_backends(self , ["tf"])
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu)
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu)
lowerCamelCase__: List[Any] =tf.distribute.TPUStrategy(self._setup_tpu)
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU")
lowerCamelCase__: Any =tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""")
else:
tf.config.set_visible_devices([] , "GPU") # disable GPU
lowerCamelCase__: Dict =tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""")
return strategy
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Dict:
'''simple docstring'''
requires_backends(self , ["tf"])
return self._setup_tpu is not None
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict:
'''simple docstring'''
requires_backends(self , ["tf"])
return self._setup_strategy
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
requires_backends(self , ["tf"])
return tf.config.list_physical_devices("GPU")
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->str:
'''simple docstring'''
requires_backends(self , ["tf"])
if self.cuda:
return len(self.gpu_list)
return 0
@property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
return self.n_gpu > 0
| 709 |
from __future__ import annotations
def lowerCAmelCase_ ( __a , __a , __a , ) -> tuple:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 437 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.