code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=9_9 , __UpperCAmelCase=3_2 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=3_7 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=1_6 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="None" , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = parent
lowerCAmelCase__ :Optional[int] = batch_size
lowerCAmelCase__ :List[Any] = seq_length
lowerCAmelCase__ :int = is_training
lowerCAmelCase__ :Optional[Any] = use_input_mask
lowerCAmelCase__ :str = use_token_type_ids
lowerCAmelCase__ :Tuple = use_labels
lowerCAmelCase__ :Any = vocab_size
lowerCAmelCase__ :Union[str, Any] = hidden_size
lowerCAmelCase__ :List[str] = num_hidden_layers
lowerCAmelCase__ :Optional[int] = num_attention_heads
lowerCAmelCase__ :Tuple = intermediate_size
lowerCAmelCase__ :Union[str, Any] = hidden_act
lowerCAmelCase__ :int = hidden_dropout_prob
lowerCAmelCase__ :str = attention_probs_dropout_prob
lowerCAmelCase__ :List[Any] = max_position_embeddings
lowerCAmelCase__ :str = type_vocab_size
lowerCAmelCase__ :List[str] = type_sequence_label_size
lowerCAmelCase__ :int = initializer_range
lowerCAmelCase__ :List[str] = num_labels
lowerCAmelCase__ :List[Any] = num_choices
lowerCAmelCase__ :int = relative_attention
lowerCAmelCase__ :Optional[Any] = position_biased_input
lowerCAmelCase__ :Union[str, Any] = pos_att_type
lowerCAmelCase__ :Optional[Any] = scope
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ :Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase__ :Union[str, Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ :str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ :Any = None
lowerCAmelCase__ :int = None
lowerCAmelCase__ :str = None
if self.use_labels:
lowerCAmelCase__ :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ :Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ :List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.get_config()
lowerCAmelCase__ :List[Any] = 3_0_0
return config
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = DebertaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0]
lowerCAmelCase__ :List[str] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0]
lowerCAmelCase__ :str = model(__UpperCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = DebertaForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.num_labels
lowerCAmelCase__ :List[str] = DebertaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.num_labels
lowerCAmelCase__ :int = DebertaForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = DebertaForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :Tuple = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :Optional[Any] = config_and_inputs
lowerCAmelCase__ :Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__magic_name__ :List[str] = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ :str = True
__magic_name__ :Tuple = False
__magic_name__ :int = False
__magic_name__ :Optional[int] = False
__magic_name__ :List[str] = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = DebertaModelTester(self )
lowerCAmelCase__ :Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 )
def snake_case ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCAmelCase )
@slow
def snake_case ( self ):
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :Dict = DebertaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def snake_case ( self ):
'''simple docstring'''
pass
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = DebertaModel.from_pretrained('microsoft/deberta-base' )
lowerCAmelCase__ :Any = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase__ :List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ :Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
# compare the actual values for a slice.
lowerCAmelCase__ :Any = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 ) , F"{output[:, 1:4, 1:4]}" )
| 93 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :int = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def snake_case ( self , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__UpperCAmelCase ) )
lowerCAmelCase__ :List[str] = np.random.RandomState(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.get_dummy_inputs()
lowerCAmelCase__ :Optional[int] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Union[str, Any] = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :str = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs()
lowerCAmelCase__ :Optional[Any] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :int = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# warmup pass to apply optimizations
lowerCAmelCase__ :List[Any] = pipe(**self.get_dummy_inputs() )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs()
lowerCAmelCase__ :int = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Union[str, Any] = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Any = self.get_dummy_inputs()
lowerCAmelCase__ :List[str] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Optional[int] = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs()
lowerCAmelCase__ :Any = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :int = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Any = self.get_dummy_inputs()
lowerCAmelCase__ :List[Any] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Optional[Any] = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = ort.SessionOptions()
lowerCAmelCase__ :Optional[int] = False
return options
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCAmelCase__ :Any = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
lowerCAmelCase__ :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = 'A fantasy landscape, trending on artstation'
lowerCAmelCase__ :Optional[Any] = np.random.RandomState(0 )
lowerCAmelCase__ :List[str] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__UpperCAmelCase , output_type='np' , )
lowerCAmelCase__ :Any = output.images
lowerCAmelCase__ :List[str] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowerCAmelCase__ :List[Any] = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCAmelCase__ :Optional[Any] = init_image.resize((7_6_8, 5_1_2) )
lowerCAmelCase__ :List[Any] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowerCAmelCase__ :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = 'A fantasy landscape, trending on artstation'
lowerCAmelCase__ :List[Any] = np.random.RandomState(0 )
lowerCAmelCase__ :List[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__UpperCAmelCase , output_type='np' , )
lowerCAmelCase__ :Optional[Any] = output.images
lowerCAmelCase__ :int = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowerCAmelCase__ :List[Any] = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 93 | 1 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
__A = logging.get_logger(__name__)
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.' , __UpperCAmelCase , )
super().__init__(args=__UpperCAmelCase , **__UpperCAmelCase )
| 93 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__A = ["""gpt2"""]
__A = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :List[str] = tokenizer
lowerCAmelCase__ :Optional[int] = AutoConfig.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = TFGPTaLMHeadModel.from_config(__UpperCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = self.tokenizer(__UpperCAmelCase )
lowerCAmelCase__ :int = tokenized['input_ids'].to_tensor()
lowerCAmelCase__ :Optional[Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCAmelCase__ :int = self.model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ :List[str] = [GPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCAmelCase__ :Union[str, Any] = [TFGPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCAmelCase__ :int = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
lowerCAmelCase__ :str = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def snake_case ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowerCAmelCase__ :int = tokenizer([test_inputs] , return_tensors='tf' )
lowerCAmelCase__ :Optional[int] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCAmelCase__ :Optional[int] = python_outputs[key].numpy()
lowerCAmelCase__ :List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__UpperCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase__ :Optional[int] = tf.function(__UpperCAmelCase )
for test_inputs in self.test_sentences:
lowerCAmelCase__ :Any = tf.constant(__UpperCAmelCase )
lowerCAmelCase__ :int = compiled_tokenizer(__UpperCAmelCase )
lowerCAmelCase__ :Any = tf_tokenizer(__UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase__ :List[str] = ModelToSave(tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase__ :List[str] = model.serving(__UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCAmelCase__ :Union[str, Any] = Path(__UpperCAmelCase ) / 'saved.model'
tf.saved_model.save(__UpperCAmelCase , __UpperCAmelCase , signatures={'serving_default': model.serving} )
lowerCAmelCase__ :str = tf.saved_model.load(__UpperCAmelCase )
lowerCAmelCase__ :Dict = loaded_model.signatures['serving_default'](__UpperCAmelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase__ :List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase__ :List[str] = tf_tokenizer(__UpperCAmelCase ) # Build model with some sample inputs
lowerCAmelCase__ :Union[str, Any] = tf_tokenizer.get_config()
lowerCAmelCase__ :Tuple = TFGPTaTokenizer.from_config(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = model_from_config(__UpperCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCAmelCase__ :int = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
lowerCAmelCase__ :Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase__ :List[str] = tf_tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 93 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 93 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __A (_SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = 384
if "tiny" in model_name:
lowerCAmelCase__ :List[Any] = [3, 3, 9, 3]
lowerCAmelCase__ :Tuple = [96, 192, 384, 768]
if "small" in model_name:
lowerCAmelCase__ :Union[str, Any] = [3, 3, 27, 3]
lowerCAmelCase__ :Any = [96, 192, 384, 768]
if "base" in model_name:
lowerCAmelCase__ :Dict = [3, 3, 27, 3]
lowerCAmelCase__ :Any = [128, 256, 512, 1024]
lowerCAmelCase__ :Union[str, Any] = 512
if "large" in model_name:
lowerCAmelCase__ :int = [3, 3, 27, 3]
lowerCAmelCase__ :Any = [192, 384, 768, 1536]
lowerCAmelCase__ :Optional[Any] = 768
if "xlarge" in model_name:
lowerCAmelCase__ :Optional[Any] = [3, 3, 27, 3]
lowerCAmelCase__ :str = [256, 512, 1024, 2048]
lowerCAmelCase__ :Union[str, Any] = 1024
# set label information
lowerCAmelCase__ :Tuple = 150
lowerCAmelCase__ :List[Any] = 'huggingface/label-files'
lowerCAmelCase__ :Tuple = 'ade20k-id2label.json'
lowerCAmelCase__ :Tuple = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ :Dict = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase__ :int = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ :List[str] = ConvNextConfig(
depths=_SCREAMING_SNAKE_CASE , hidden_sizes=_SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
lowerCAmelCase__ :Union[str, Any] = UperNetConfig(
backbone_config=_SCREAMING_SNAKE_CASE , auxiliary_in_channels=_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE , )
return config
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :str = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = val
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Dict = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
lowerCAmelCase__ :List[Any] = model_name_to_url[model_name]
lowerCAmelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' )['state_dict']
lowerCAmelCase__ :List[Any] = get_upernet_config(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = UperNetForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCAmelCase__ :Optional[int] = state_dict.pop(_SCREAMING_SNAKE_CASE )
if "bn" in key:
lowerCAmelCase__ :Any = key.replace('bn' , 'batch_norm' )
lowerCAmelCase__ :int = val
# rename keys
lowerCAmelCase__ :Optional[Any] = create_rename_keys(_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify on image
lowerCAmelCase__ :str = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
lowerCAmelCase__ :Optional[Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
lowerCAmelCase__ :Tuple = SegformerImageProcessor()
lowerCAmelCase__ :List[Any] = processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
lowerCAmelCase__ :Optional[Any] = model(_SCREAMING_SNAKE_CASE )
if model_name == "upernet-convnext-tiny":
lowerCAmelCase__ :Optional[Any] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
lowerCAmelCase__ :Union[str, Any] = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
lowerCAmelCase__ :Dict = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
lowerCAmelCase__ :List[str] = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
lowerCAmelCase__ :Optional[Any] = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F'''upernet-convnext-{size}''' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 93 | 1 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__A = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
__A = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
__A = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
return float((preds == labels).mean() )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :str = simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :int = np.array(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = np.array(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = en_sentvecs.shape[0]
# mean centering
lowerCAmelCase__ :List[Any] = en_sentvecs - np.mean(_SCREAMING_SNAKE_CASE , axis=0 )
lowerCAmelCase__ :Union[str, Any] = in_sentvecs - np.mean(_SCREAMING_SNAKE_CASE , axis=0 )
lowerCAmelCase__ :Tuple = cdist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'cosine' )
lowerCAmelCase__ :str = np.array(range(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ :int = sim.argsort(axis=1 )[:, :10]
lowerCAmelCase__ :Optional[Any] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
'references': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__UpperCAmelCase , __UpperCAmelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__UpperCAmelCase , __UpperCAmelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__UpperCAmelCase , __UpperCAmelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
| 93 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__A = logging.get_logger(__name__)
__A = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__A = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
__A = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = VOCAB_FILES_NAMES
__magic_name__ :List[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ :str = ["""input_ids""", """attention_mask"""]
__magic_name__ :Any = RobertaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Optional[int] = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
lowerCAmelCase__ :List[Any] = add_prefix_space
lowerCAmelCase__ :str = pre_tok_class(**__UpperCAmelCase )
lowerCAmelCase__ :List[str] = add_prefix_space
lowerCAmelCase__ :str = 'post_processor'
lowerCAmelCase__ :Optional[Any] = getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
if tokenizer_component_instance:
lowerCAmelCase__ :Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase__ :Any = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase__ :int = tuple(state['cls'] )
lowerCAmelCase__ :List[Any] = False
if state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Union[str, Any] = add_prefix_space
lowerCAmelCase__ :Any = True
if state.get('trim_offsets' , __UpperCAmelCase ) != trim_offsets:
lowerCAmelCase__ :Union[str, Any] = trim_offsets
lowerCAmelCase__ :Optional[int] = True
if changes_to_apply:
lowerCAmelCase__ :str = getattr(__UpperCAmelCase , state.pop('type' ) )
lowerCAmelCase__ :Any = component_class(**__UpperCAmelCase )
setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
@property
def snake_case ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else value
lowerCAmelCase__ :List[str] = value
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = [self.sep_token_id]
lowerCAmelCase__ :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 93 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__A = """Create a default config file for Accelerate with only a few flags set."""
def __A (_SCREAMING_SNAKE_CASE="no" , _SCREAMING_SNAKE_CASE = default_json_config_file , _SCREAMING_SNAKE_CASE = False ) ->List[str]:
"""simple docstring"""
lowerCAmelCase__ :int = Path(_SCREAMING_SNAKE_CASE )
path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
if path.exists():
print(
F"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." )
return False
lowerCAmelCase__ :Tuple = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" )
lowerCAmelCase__ :Union[str, Any] = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
lowerCAmelCase__ :str = torch.cuda.device_count()
lowerCAmelCase__ :Any = num_gpus
lowerCAmelCase__ :Tuple = False
if num_gpus > 1:
lowerCAmelCase__ :int = 'MULTI_GPU'
else:
lowerCAmelCase__ :int = 'NO'
elif is_xpu_available() and use_xpu:
lowerCAmelCase__ :Optional[Any] = torch.xpu.device_count()
lowerCAmelCase__ :Tuple = num_xpus
lowerCAmelCase__ :List[str] = False
if num_xpus > 1:
lowerCAmelCase__ :Any = 'MULTI_XPU'
else:
lowerCAmelCase__ :List[str] = 'NO'
elif is_npu_available():
lowerCAmelCase__ :Optional[int] = torch.npu.device_count()
lowerCAmelCase__ :Union[str, Any] = num_npus
lowerCAmelCase__ :Optional[Any] = False
if num_npus > 1:
lowerCAmelCase__ :Dict = 'MULTI_NPU'
else:
lowerCAmelCase__ :int = 'NO'
else:
lowerCAmelCase__ :List[Any] = 0
lowerCAmelCase__ :Union[str, Any] = True
lowerCAmelCase__ :str = 1
lowerCAmelCase__ :Optional[Any] = 'NO'
lowerCAmelCase__ :Optional[int] = ClusterConfig(**_SCREAMING_SNAKE_CASE )
config.to_json_file(_SCREAMING_SNAKE_CASE )
return path
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Dict = parser.add_parser('default' , parents=_SCREAMING_SNAKE_CASE , help=_SCREAMING_SNAKE_CASE , formatter_class=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'--config_file' , default=_SCREAMING_SNAKE_CASE , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=_SCREAMING_SNAKE_CASE , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Any = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"accelerate configuration saved at {config_file}" )
| 93 |
"""simple docstring"""
from __future__ import annotations
import math
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__A = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def __A (_SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
lowerCAmelCase__ :Tuple = []
for num in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :int = 0
while 2 * i * i <= odd_composites[num]:
lowerCAmelCase__ :int = odd_composites[num] - 2 * i * i
if is_prime(_SCREAMING_SNAKE_CASE ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_SCREAMING_SNAKE_CASE ) == n:
return list_nums
return []
def __A () ->int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 93 | 1 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _lowerCAmelCase ( a ):
"""simple docstring"""
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
with open(__UpperCAmelCase , encoding='utf-8' ) as input_file:
lowerCAmelCase__ :Optional[Any] = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
lowerCAmelCase__ :List[str] = input_file.read()
lowerCAmelCase__ :str = regexp.search(__UpperCAmelCase )
return match
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
with open(__UpperCAmelCase , encoding='utf-8' ) as input_file:
lowerCAmelCase__ :Optional[int] = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
lowerCAmelCase__ :int = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowerCAmelCase__ :Optional[Any] = regexp.finditer(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = Path('./datasets' )
lowerCAmelCase__ :Union[str, Any] = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__UpperCAmelCase ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = Path('./datasets' )
lowerCAmelCase__ :str = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__UpperCAmelCase ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 93 |
"""simple docstring"""
import re
def __A (_SCREAMING_SNAKE_CASE ) ->list:
"""simple docstring"""
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )]
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
try:
lowerCAmelCase__ :Any = split_input(_SCREAMING_SNAKE_CASE )
if upper:
lowerCAmelCase__ :str = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCAmelCase__ :int = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_simple_case(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
try:
lowerCAmelCase__ :str = to_simple_case(_SCREAMING_SNAKE_CASE )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '_' )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '-' )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 93 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = params
lowerCAmelCase__ :str = np.array(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = np.array([len(__UpperCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def snake_case ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.params.max_model_input_size
lowerCAmelCase__ :Tuple = self.lengths > max_len
logger.info(F"Splitting {sum(__UpperCAmelCase )} too long sequences." )
def divide_chunks(__UpperCAmelCase , __UpperCAmelCase ):
return [l[i : i + n] for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )]
lowerCAmelCase__ :str = []
lowerCAmelCase__ :Union[str, Any] = []
if self.params.mlm:
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
lowerCAmelCase__ , lowerCAmelCase__ :Dict = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowerCAmelCase__ :Optional[int] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowerCAmelCase__ :Optional[int] = np.insert(__UpperCAmelCase , 0 , __UpperCAmelCase )
if sub_s[-1] != sep_id:
lowerCAmelCase__ :Optional[int] = np.insert(__UpperCAmelCase , len(__UpperCAmelCase ) , __UpperCAmelCase )
assert len(__UpperCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__UpperCAmelCase )
new_tok_ids.extend(__UpperCAmelCase )
new_lengths.extend([len(__UpperCAmelCase ) for l in sub_seqs] )
lowerCAmelCase__ :Union[str, Any] = np.array(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = np.array(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = len(self )
lowerCAmelCase__ :List[Any] = self.lengths > 1_1
lowerCAmelCase__ :str = self.token_ids[indices]
lowerCAmelCase__ :List[str] = self.lengths[indices]
lowerCAmelCase__ :int = len(self )
logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def snake_case ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowerCAmelCase__ :Optional[int] = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ :Optional[Any] = len(self )
lowerCAmelCase__ :Optional[Any] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowerCAmelCase__ :List[Any] = (unk_occs / self.lengths) < 0.5
lowerCAmelCase__ :Union[str, Any] = self.token_ids[indices]
lowerCAmelCase__ :str = self.lengths[indices]
lowerCAmelCase__ :Union[str, Any] = len(self )
logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def snake_case ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = [t[0] for t in batch]
lowerCAmelCase__ :Optional[Any] = [t[1] for t in batch]
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
# Max for paddings
lowerCAmelCase__ :str = max(__UpperCAmelCase )
# Pad token ids
if self.params.mlm:
lowerCAmelCase__ :Tuple = self.params.special_tok_ids['pad_token']
else:
lowerCAmelCase__ :str = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ :Optional[int] = [list(t.astype(__UpperCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(__UpperCAmelCase )
assert all(len(__UpperCAmelCase ) == max_seq_len_ for t in tk_ )
lowerCAmelCase__ :List[Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowerCAmelCase__ :List[str] = torch.tensor(__UpperCAmelCase ) # (bs)
return tk_t, lg_t
| 93 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
__A = """"""
__A = """"""
__A = """"""
__A = """"""
def __A (_SCREAMING_SNAKE_CASE ) ->None:
"""simple docstring"""
lowerCAmelCase__ :Any = tweepy.OAuthHandler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
auth.set_access_token(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = tweepy.API(_SCREAMING_SNAKE_CASE )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase__ :Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase__ :Optional[Any] = api.user_timeline(screen_name=_SCREAMING_SNAKE_CASE , count=200 )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# save the id of the oldest tweet less one
lowerCAmelCase__ :Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_SCREAMING_SNAKE_CASE ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase__ :Union[str, Any] = api.user_timeline(
screen_name=_SCREAMING_SNAKE_CASE , count=200 , max_id=_SCREAMING_SNAKE_CASE )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# update the id of the oldest tweet less one
lowerCAmelCase__ :Tuple = alltweets[-1].id - 1
print(F"...{len(_SCREAMING_SNAKE_CASE )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase__ :Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , 'w' ) as f:
lowerCAmelCase__ :List[str] = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 93 | 1 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__A = logging.get_logger(__name__)
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Any = ["""audio_values""", """audio_mask"""]
def __init__( self , __UpperCAmelCase=2_0_4_8 , __UpperCAmelCase=1 , __UpperCAmelCase=[1_6, 1_6] , __UpperCAmelCase=1_2_8 , __UpperCAmelCase=4_4_1_0_0 , __UpperCAmelCase=8_6 , __UpperCAmelCase=2_0_4_8 , __UpperCAmelCase=0.0 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :Optional[Any] = spectrogram_length
lowerCAmelCase__ :Tuple = num_channels
lowerCAmelCase__ :Optional[Any] = patch_size
lowerCAmelCase__ :Tuple = feature_size // self.patch_size[1]
lowerCAmelCase__ :Optional[int] = n_fft
lowerCAmelCase__ :Union[str, Any] = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase__ :List[Any] = sampling_rate
lowerCAmelCase__ :str = padding_value
lowerCAmelCase__ :Tuple = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__UpperCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=__UpperCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = spectrogram(
__UpperCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
lowerCAmelCase__ :List[str] = log_spec[:, :-1]
lowerCAmelCase__ :Union[str, Any] = log_spec - 20.0
lowerCAmelCase__ :Optional[Any] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , **__UpperCAmelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCAmelCase__ :Optional[Any] = isinstance(__UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
lowerCAmelCase__ :Tuple = is_batched_numpy or (
isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ :Dict = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ):
lowerCAmelCase__ :Optional[Any] = np.asarray(__UpperCAmelCase , dtype=np.floataa )
elif isinstance(__UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ :Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ :Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase__ :Dict = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __UpperCAmelCase ):
lowerCAmelCase__ :Dict = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase__ :Optional[int] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase__ :Dict = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase__ :Any = np.array(__UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase__ :Optional[Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase__ :Dict = np.ones([len(__UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase__ :Dict = padded_audio_features * self.padding_value
for i in range(len(__UpperCAmelCase ) ):
lowerCAmelCase__ :Union[str, Any] = audio_features[i]
lowerCAmelCase__ :List[str] = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase__ :Union[str, Any] = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
lowerCAmelCase__ :Tuple = {'audio_values': padded_audio_features}
lowerCAmelCase__ :Optional[Any] = BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
return encoded_inputs
| 93 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCAmelCase ( a , a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :int = StableUnCLIPPipeline
__magic_name__ :int = TEXT_TO_IMAGE_PARAMS
__magic_name__ :List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ :Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__ :Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__magic_name__ :List[str] = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 3_2
lowerCAmelCase__ :List[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=__UpperCAmelCase , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ :int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=__UpperCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=__UpperCAmelCase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase__ :Dict = StableUnCLIPImageNormalizer(embedding_dim=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase__ :str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__UpperCAmelCase , layers_per_block=1 , upcast_attention=__UpperCAmelCase , use_linear_projection=__UpperCAmelCase , )
torch.manual_seed(0 )
lowerCAmelCase__ :Tuple = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = AutoencoderKL()
lowerCAmelCase__ :Optional[int] = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :str = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[int] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__UpperCAmelCase )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
lowerCAmelCase__ :List[Any] = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ :List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ :List[str] = pipe('anime turle' , generator=__UpperCAmelCase , output_type='np' )
lowerCAmelCase__ :Union[str, Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ :int = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
lowerCAmelCase__ :List[str] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ :Tuple = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ :str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 93 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = DiTPipeline
__magic_name__ :Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__magic_name__ :Dict = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
__magic_name__ :List[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__magic_name__ :Dict = False
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :str = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__UpperCAmelCase , activation_fn='gelu-approximate' , num_embeds_ada_norm=1_0_0_0 , norm_type='ada_norm_zero' , norm_elementwise_affine=__UpperCAmelCase , )
lowerCAmelCase__ :int = AutoencoderKL()
lowerCAmelCase__ :Union[str, Any] = DDIMScheduler()
lowerCAmelCase__ :List[Any] = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :int = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :List[str] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = 'cpu'
lowerCAmelCase__ :str = self.get_dummy_components()
lowerCAmelCase__ :Any = self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Dict = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
lowerCAmelCase__ :List[str] = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
lowerCAmelCase__ :Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCAmelCase , 1E-3 )
def snake_case ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=__UpperCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
lowerCAmelCase__ :Union[str, Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
lowerCAmelCase__ :Tuple = pipe.get_label_ids(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=4_0 , output_type='np' ).images
for word, image in zip(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
lowerCAmelCase__ :Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
lowerCAmelCase__ :Optional[int] = ['vase', 'umbrella']
lowerCAmelCase__ :Optional[Any] = pipe.get_label_ids(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = torch.manual_seed(0 )
lowerCAmelCase__ :int = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2_5 , output_type='np' ).images
for word, image in zip(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 93 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
__A = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[str] = field(
default=a , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(a )} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """The input training data file (a text file)."""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Whether ot not to use whole word mask."""} )
__magic_name__ :float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
__magic_name__ :float = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
__magic_name__ :int = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
__magic_name__ :int = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) ->Optional[int]:
"""simple docstring"""
def _dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , ref_path=_SCREAMING_SNAKE_CASE , )
return LineByLineTextDataset(tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_SCREAMING_SNAKE_CASE , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_SCREAMING_SNAKE_CASE ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCAmelCase__ :Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ :List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowerCAmelCase__ :List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
lowerCAmelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ :str = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
lowerCAmelCase__ :Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
lowerCAmelCase__ :int = AutoModelWithLMHead.from_config(_SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(_SCREAMING_SNAKE_CASE ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
lowerCAmelCase__ :Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCAmelCase__ :Any = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowerCAmelCase__ :List[str] = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCAmelCase__ :Optional[int] = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , evaluate=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCAmelCase__ :str = DataCollatorForPermutationLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCAmelCase__ :Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
else:
lowerCAmelCase__ :str = DataCollatorForLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase__ :Tuple = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , prediction_loss_only=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowerCAmelCase__ :Tuple = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_SCREAMING_SNAKE_CASE )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ :Optional[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase__ :Any = trainer.evaluate()
lowerCAmelCase__ :Optional[Any] = math.exp(eval_output['eval_loss'] )
lowerCAmelCase__ :Dict = {'perplexity': perplexity}
lowerCAmelCase__ :List[Any] = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(_SCREAMING_SNAKE_CASE )
return results
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 93 | 1 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Any = [0] * len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = []
lowerCAmelCase__ :Dict = []
lowerCAmelCase__ :Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(_SCREAMING_SNAKE_CASE )
while queue:
lowerCAmelCase__ :Tuple = queue.pop(0 )
cnt += 1
topo.append(_SCREAMING_SNAKE_CASE )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_SCREAMING_SNAKE_CASE )
if cnt != len(_SCREAMING_SNAKE_CASE ):
print('Cycle exists' )
else:
print(_SCREAMING_SNAKE_CASE )
# Adjacency List of Graph
__A = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 93 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :torch.FloatTensor
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
@register_to_config
def __init__( self , __UpperCAmelCase = 1_6 , __UpperCAmelCase = 8_8 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 3_2 , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = "geglu" , __UpperCAmelCase = True , __UpperCAmelCase = True , ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :Dict = num_attention_heads
lowerCAmelCase__ :Any = attention_head_dim
lowerCAmelCase__ :Optional[int] = num_attention_heads * attention_head_dim
lowerCAmelCase__ :Any = in_channels
lowerCAmelCase__ :str = torch.nn.GroupNorm(num_groups=__UpperCAmelCase , num_channels=__UpperCAmelCase , eps=1E-6 , affine=__UpperCAmelCase )
lowerCAmelCase__ :int = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
# 3. Define transformers blocks
lowerCAmelCase__ :List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , dropout=__UpperCAmelCase , cross_attention_dim=__UpperCAmelCase , activation_fn=__UpperCAmelCase , attention_bias=__UpperCAmelCase , double_self_attention=__UpperCAmelCase , norm_elementwise_affine=__UpperCAmelCase , )
for d in range(__UpperCAmelCase )
] )
lowerCAmelCase__ :List[Any] = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase = True , ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = hidden_states.shape
lowerCAmelCase__ :Tuple = batch_frames // num_frames
lowerCAmelCase__ :str = hidden_states
lowerCAmelCase__ :Union[str, Any] = hidden_states[None, :].reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :str = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCAmelCase__ :Optional[int] = self.norm(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = self.proj_in(__UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowerCAmelCase__ :Optional[int] = block(
__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase , cross_attention_kwargs=__UpperCAmelCase , class_labels=__UpperCAmelCase , )
# 3. Output
lowerCAmelCase__ :Any = self.proj_out(__UpperCAmelCase )
lowerCAmelCase__ :Dict = (
hidden_states[None, None, :]
.reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCAmelCase__ :Optional[Any] = hidden_states.reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__UpperCAmelCase )
| 93 | 1 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _lowerCAmelCase ( enum.Enum ):
"""simple docstring"""
__magic_name__ :Tuple = 0
__magic_name__ :Dict = 1
__magic_name__ :Optional[int] = 2
@add_end_docstrings(a )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Optional[Any] = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase__ :List[str] = None
if self.model.config.prefix is not None:
lowerCAmelCase__ :Optional[int] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase__ :Dict = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = self._sanitize_parameters(prefix=__UpperCAmelCase , **self._forward_params )
lowerCAmelCase__ :Dict = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase__ :List[str] = {**self._forward_params, **forward_params}
def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Dict = {}
if prefix is not None:
lowerCAmelCase__ :Any = prefix
if prefix:
lowerCAmelCase__ :Optional[int] = self.tokenizer(
__UpperCAmelCase , padding=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=self.framework )
lowerCAmelCase__ :Optional[Any] = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
' [None, \'hole\']' )
lowerCAmelCase__ :str = handle_long_generation
preprocess_params.update(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = generate_kwargs
lowerCAmelCase__ :Any = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
lowerCAmelCase__ :Dict = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
lowerCAmelCase__ :Tuple = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase__ :Optional[int] = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase__ :Any = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase__ :str = self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
if len(__UpperCAmelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
lowerCAmelCase__ :str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*__UpperCAmelCase , **__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="" , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.tokenizer(
prefix + prompt_text , padding=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=self.framework )
lowerCAmelCase__ :Tuple = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase__ :Tuple = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase__ :str = generate_kwargs['max_new_tokens']
else:
lowerCAmelCase__ :Union[str, Any] = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase__ :Optional[int] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
lowerCAmelCase__ :Any = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase__ :List[str] = inputs['attention_mask'][:, -keep_length:]
return inputs
def snake_case ( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = model_inputs['input_ids']
lowerCAmelCase__ :List[str] = model_inputs.get('attention_mask' , __UpperCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase__ :Dict = None
lowerCAmelCase__ :str = None
lowerCAmelCase__ :Dict = 1
else:
lowerCAmelCase__ :Union[str, Any] = input_ids.shape[0]
lowerCAmelCase__ :List[Any] = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase__ :Optional[int] = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
lowerCAmelCase__ :Tuple = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase__ :str = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase__ :Optional[int] = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase__ :Union[str, Any] = self.model.generate(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase__ :str = generated_sequence.reshape(__UpperCAmelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCAmelCase__ :List[Any] = tf.reshape(__UpperCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=ReturnType.FULL_TEXT , __UpperCAmelCase=True ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = model_outputs['generated_sequence'][0]
lowerCAmelCase__ :List[Any] = model_outputs['input_ids']
lowerCAmelCase__ :Optional[int] = model_outputs['prompt_text']
lowerCAmelCase__ :Optional[Any] = generated_sequence.numpy().tolist()
lowerCAmelCase__ :str = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase__ :Optional[Any] = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase__ :int = self.tokenizer.decode(
__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase__ :List[Any] = 0
else:
lowerCAmelCase__ :Dict = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , ) )
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase__ :Dict = prompt_text + text[prompt_length:]
else:
lowerCAmelCase__ :Optional[int] = text[prompt_length:]
lowerCAmelCase__ :List[str] = {'generated_text': all_text}
records.append(__UpperCAmelCase )
return records
| 93 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=3_0 , __UpperCAmelCase=4_0_0 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 2_5_5 , __UpperCAmelCase=True , ):
'''simple docstring'''
lowerCAmelCase__ :Any = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
lowerCAmelCase__ :List[Any] = parent
lowerCAmelCase__ :int = batch_size
lowerCAmelCase__ :Union[str, Any] = num_channels
lowerCAmelCase__ :Any = min_resolution
lowerCAmelCase__ :Dict = max_resolution
lowerCAmelCase__ :Dict = do_resize
lowerCAmelCase__ :Optional[Any] = size
lowerCAmelCase__ :List[str] = do_normalize
lowerCAmelCase__ :str = image_mean
lowerCAmelCase__ :Tuple = image_std
lowerCAmelCase__ :Dict = do_rescale
lowerCAmelCase__ :Tuple = rescale_factor
lowerCAmelCase__ :Optional[int] = do_pad
def snake_case ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if not batched:
lowerCAmelCase__ :str = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = image.size
else:
lowerCAmelCase__ , lowerCAmelCase__ :str = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ :int = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase__ :List[str] = self.size['shortest_edge']
elif w > h:
lowerCAmelCase__ :Union[str, Any] = self.size['shortest_edge']
lowerCAmelCase__ :Any = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase__ :int = self.size['shortest_edge']
lowerCAmelCase__ :Union[str, Any] = self.size['shortest_edge']
else:
lowerCAmelCase__ :Optional[Any] = []
for image in image_inputs:
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ :List[str] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
lowerCAmelCase__ :List[Any] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Any = DetaImageProcessor if is_vision_available() else None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = DetaImageProcessingTester(self )
@property
def snake_case ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_rescale' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_pad' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'size' ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
lowerCAmelCase__ :Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
lowerCAmelCase__ :List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ :Tuple = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase__ :Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ :str = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ :Dict = json.loads(f.read() )
lowerCAmelCase__ :int = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
lowerCAmelCase__ :int = DetaImageProcessor()
lowerCAmelCase__ :List[Any] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ :str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ :Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCAmelCase ) )
# verify boxes
lowerCAmelCase__ :Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Dict = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ :Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCAmelCase ) )
# verify is_crowd
lowerCAmelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCAmelCase ) )
# verify class_labels
lowerCAmelCase__ :Optional[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCAmelCase ) )
# verify orig_size
lowerCAmelCase__ :str = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCAmelCase ) )
# verify size
lowerCAmelCase__ :Any = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCAmelCase ) )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ :Dict = json.loads(f.read() )
lowerCAmelCase__ :Dict = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
lowerCAmelCase__ :Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCAmelCase__ :Dict = DetaImageProcessor(format='coco_panoptic' )
lowerCAmelCase__ :Optional[int] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ :str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ :Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCAmelCase ) )
# verify boxes
lowerCAmelCase__ :int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ :Optional[int] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCAmelCase ) )
# verify is_crowd
lowerCAmelCase__ :Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCAmelCase ) )
# verify class_labels
lowerCAmelCase__ :List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCAmelCase ) )
# verify masks
lowerCAmelCase__ :Optional[int] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __UpperCAmelCase )
# verify orig_size
lowerCAmelCase__ :Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCAmelCase ) )
# verify size
lowerCAmelCase__ :Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCAmelCase ) )
| 93 | 1 |
"""simple docstring"""
import math
import sys
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = ''
try:
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as binary_file:
lowerCAmelCase__ :int = binary_file.read()
for dat in data:
lowerCAmelCase__ :Dict = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :List[str] = {'0': '0', '1': '1'}
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = '', ''
lowerCAmelCase__ :Tuple = len(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCAmelCase__ :Union[str, Any] = lexicon[curr_string]
result += last_match_id
lowerCAmelCase__ :str = last_match_id + '0'
if math.loga(_SCREAMING_SNAKE_CASE ).is_integer():
lowerCAmelCase__ :Any = {}
for curr_key in list(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = lexicon.pop(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = new_lex
lowerCAmelCase__ :Optional[Any] = last_match_id + '1'
index += 1
lowerCAmelCase__ :str = ''
return result
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->None:
"""simple docstring"""
lowerCAmelCase__ :Any = 8
try:
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as opened_file:
lowerCAmelCase__ :Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Any = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowerCAmelCase__ :Optional[int] = data_bits[counter:]
lowerCAmelCase__ :List[str] = data_bits[counter + 1 :]
return data_bits
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->None:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = read_file_binary(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = remove_prefix(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Union[str, Any] = decompress_data(_SCREAMING_SNAKE_CASE )
write_file_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 93 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = int(_SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ :int = divmod(_SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(_SCREAMING_SNAKE_CASE ) + str(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :str = str(_SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('No input value was provided' )
lowerCAmelCase__ :Dict = '-' if number.startswith('-' ) else ''
lowerCAmelCase__ :int = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return F"{negative}0b{binary_recursive(int(_SCREAMING_SNAKE_CASE ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Optional[int] = """switch_transformers"""
__magic_name__ :Optional[Any] = ["""past_key_values"""]
__magic_name__ :str = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __UpperCAmelCase=3_2_1_2_8 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=6_4 , __UpperCAmelCase=2_0_4_8 , __UpperCAmelCase=6_4 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3 , __UpperCAmelCase=1_2 , __UpperCAmelCase=8 , __UpperCAmelCase=False , __UpperCAmelCase=0.01 , __UpperCAmelCase="float32" , __UpperCAmelCase=False , __UpperCAmelCase=3_2 , __UpperCAmelCase=1_2_8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=0.0_01 , __UpperCAmelCase=0.0_01 , __UpperCAmelCase=1.0 , __UpperCAmelCase="relu" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = vocab_size
lowerCAmelCase__ :int = d_model
lowerCAmelCase__ :Union[str, Any] = d_kv
lowerCAmelCase__ :Dict = d_ff
lowerCAmelCase__ :List[str] = num_sparse_encoder_layers
lowerCAmelCase__ :Union[str, Any] = num_layers
lowerCAmelCase__ :int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase__ :List[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowerCAmelCase__ :Optional[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
lowerCAmelCase__ :Optional[int] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowerCAmelCase__ :List[str] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowerCAmelCase__ :Optional[int] = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowerCAmelCase__ :List[str] = num_heads
lowerCAmelCase__ :Dict = num_experts
lowerCAmelCase__ :Optional[Any] = expert_capacity
lowerCAmelCase__ :Union[str, Any] = router_bias
lowerCAmelCase__ :Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
lowerCAmelCase__ :str = router_dtype
lowerCAmelCase__ :int = router_ignore_padding_tokens
lowerCAmelCase__ :Dict = relative_attention_num_buckets
lowerCAmelCase__ :Dict = relative_attention_max_distance
lowerCAmelCase__ :str = dropout_rate
lowerCAmelCase__ :List[Any] = layer_norm_epsilon
lowerCAmelCase__ :List[str] = initializer_factor
lowerCAmelCase__ :List[Any] = feed_forward_proj
lowerCAmelCase__ :str = use_cache
lowerCAmelCase__ :Any = add_router_probs
lowerCAmelCase__ :List[Any] = router_z_loss_coef
lowerCAmelCase__ :List[str] = router_aux_loss_coef
lowerCAmelCase__ :Union[str, Any] = self.feed_forward_proj.split('-' )
lowerCAmelCase__ :Dict = act_info[-1]
lowerCAmelCase__ :str = act_info[0] == 'gated'
if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCAmelCase__ :Tuple = 'gelu_new'
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
| 93 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = tempfile.mkdtemp()
lowerCAmelCase__ :List[Any] = BlipImageProcessor()
lowerCAmelCase__ :Union[str, Any] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowerCAmelCase__ :Union[str, Any] = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ :List[str] = InstructBlipProcessor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).qformer_tokenizer
def snake_case ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase__ :Dict = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase__ :str = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
lowerCAmelCase__ :Dict = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.get_image_processor()
lowerCAmelCase__ :List[Any] = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase__ :str = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Dict = self.prepare_image_inputs()
lowerCAmelCase__ :List[str] = image_processor(__UpperCAmelCase , return_tensors='np' )
lowerCAmelCase__ :Optional[Any] = processor(images=__UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.get_image_processor()
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase__ :Any = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 'lower newer'
lowerCAmelCase__ :Dict = processor(text=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = qformer_tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.get_image_processor()
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
lowerCAmelCase__ :str = self.get_qformer_tokenizer()
lowerCAmelCase__ :Dict = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = 'lower newer'
lowerCAmelCase__ :Dict = self.prepare_image_inputs()
lowerCAmelCase__ :Tuple = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_image_processor()
lowerCAmelCase__ :Dict = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase__ :int = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ :Tuple = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_image_processor()
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
lowerCAmelCase__ :Dict = self.get_qformer_tokenizer()
lowerCAmelCase__ :Optional[Any] = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 'lower newer'
lowerCAmelCase__ :Optional[int] = self.prepare_image_inputs()
lowerCAmelCase__ :int = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 93 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__A = logging.get_logger(__name__)
__A = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = """deberta-v2"""
def __init__( self , __UpperCAmelCase=1_2_8_1_0_0 , __UpperCAmelCase=1_5_3_6 , __UpperCAmelCase=2_4 , __UpperCAmelCase=2_4 , __UpperCAmelCase=6_1_4_4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-7 , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=0 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=0 , __UpperCAmelCase="gelu" , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = hidden_size
lowerCAmelCase__ :Optional[int] = num_hidden_layers
lowerCAmelCase__ :Optional[int] = num_attention_heads
lowerCAmelCase__ :int = intermediate_size
lowerCAmelCase__ :Dict = hidden_act
lowerCAmelCase__ :Tuple = hidden_dropout_prob
lowerCAmelCase__ :Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ :Optional[Any] = max_position_embeddings
lowerCAmelCase__ :Optional[Any] = type_vocab_size
lowerCAmelCase__ :List[Any] = initializer_range
lowerCAmelCase__ :Dict = relative_attention
lowerCAmelCase__ :Union[str, Any] = max_relative_positions
lowerCAmelCase__ :List[str] = pad_token_id
lowerCAmelCase__ :Optional[int] = position_biased_input
# Backwards compatibility
if type(__UpperCAmelCase ) == str:
lowerCAmelCase__ :Dict = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCAmelCase__ :str = pos_att_type
lowerCAmelCase__ :Dict = vocab_size
lowerCAmelCase__ :Tuple = layer_norm_eps
lowerCAmelCase__ :Optional[Any] = kwargs.get('pooler_hidden_size' , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = pooler_dropout
lowerCAmelCase__ :int = pooler_hidden_act
class _lowerCAmelCase ( a ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ :str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase__ :Tuple = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def snake_case ( self ):
'''simple docstring'''
return 1_2
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = 3 , __UpperCAmelCase = 4_0 , __UpperCAmelCase = 4_0 , __UpperCAmelCase = None , ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = super().generate_dummy_inputs(preprocessor=__UpperCAmelCase , framework=__UpperCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 93 | 1 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A = logging.get_logger(__name__)
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Optional[Any] = ["""pixel_values"""]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 2_5_5 , __UpperCAmelCase = True , __UpperCAmelCase = IMAGENET_DEFAULT_MEAN , __UpperCAmelCase = IMAGENET_DEFAULT_STD , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :str = size if size is not None else {'shortest_edge': 2_2_4}
lowerCAmelCase__ :int = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
lowerCAmelCase__ :Dict = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase__ :Union[str, Any] = get_size_dict(__UpperCAmelCase , param_name='crop_size' )
lowerCAmelCase__ :int = do_resize
lowerCAmelCase__ :str = size
lowerCAmelCase__ :int = resample
lowerCAmelCase__ :Tuple = do_center_crop
lowerCAmelCase__ :List[str] = crop_size
lowerCAmelCase__ :str = do_rescale
lowerCAmelCase__ :Dict = rescale_factor
lowerCAmelCase__ :Optional[int] = do_normalize
lowerCAmelCase__ :int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase__ :Union[str, Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Any = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCAmelCase__ :List[Any] = int((2_5_6 / 2_2_4) * size['shortest_edge'] )
lowerCAmelCase__ :Union[str, Any] = get_resize_output_image_size(__UpperCAmelCase , size=__UpperCAmelCase , default_to_square=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
__UpperCAmelCase , size=(size_dict['height'], size_dict['width']) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(__UpperCAmelCase , size=(size['height'], size['width']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :int = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ :List[Any] = resample if resample is not None else self.resample
lowerCAmelCase__ :int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ :str = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ :int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ :Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ :List[str] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ :int = image_std if image_std is not None else self.image_std
lowerCAmelCase__ :Any = size if size is not None else self.size
lowerCAmelCase__ :str = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ :Tuple = get_size_dict(__UpperCAmelCase , param_name='crop_size' )
lowerCAmelCase__ :Optional[int] = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase__ :Union[str, Any] = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
lowerCAmelCase__ :Optional[Any] = [self.resize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_center_crop:
lowerCAmelCase__ :Dict = [self.center_crop(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ :str = [self.rescale(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_normalize:
lowerCAmelCase__ :Optional[Any] = [self.normalize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for image in images]
lowerCAmelCase__ :Optional[Any] = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
lowerCAmelCase__ :str = {'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 93 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 1 |
"""simple docstring"""
__A = 9.8_0665
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = g ) ->float:
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 93 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[Any] = """wav2vec2"""
def __init__( self , __UpperCAmelCase=3_2 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=1_2_8 , __UpperCAmelCase=1_6 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=1_0 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1_0 , __UpperCAmelCase=0 , __UpperCAmelCase=3_2_0 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1_0_0 , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=0.1 , __UpperCAmelCase="sum" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __UpperCAmelCase=(5, 3, 3, 1, 1) , __UpperCAmelCase=(1, 2, 3, 1, 1) , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = hidden_size
lowerCAmelCase__ :List[str] = feat_extract_norm
lowerCAmelCase__ :Union[str, Any] = feat_extract_activation
lowerCAmelCase__ :List[str] = list(__UpperCAmelCase )
lowerCAmelCase__ :int = list(__UpperCAmelCase )
lowerCAmelCase__ :Dict = list(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = conv_bias
lowerCAmelCase__ :List[Any] = num_conv_pos_embeddings
lowerCAmelCase__ :int = num_conv_pos_embedding_groups
lowerCAmelCase__ :str = len(self.conv_dim )
lowerCAmelCase__ :str = num_hidden_layers
lowerCAmelCase__ :Optional[int] = intermediate_size
lowerCAmelCase__ :List[Any] = hidden_act
lowerCAmelCase__ :int = num_attention_heads
lowerCAmelCase__ :Optional[int] = hidden_dropout
lowerCAmelCase__ :List[str] = attention_dropout
lowerCAmelCase__ :Tuple = activation_dropout
lowerCAmelCase__ :int = feat_proj_dropout
lowerCAmelCase__ :str = final_dropout
lowerCAmelCase__ :Tuple = layerdrop
lowerCAmelCase__ :Tuple = layer_norm_eps
lowerCAmelCase__ :Optional[Any] = initializer_range
lowerCAmelCase__ :List[str] = vocab_size
lowerCAmelCase__ :Any = do_stable_layer_norm
lowerCAmelCase__ :Optional[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ :int = apply_spec_augment
lowerCAmelCase__ :List[str] = mask_time_prob
lowerCAmelCase__ :Dict = mask_time_length
lowerCAmelCase__ :List[Any] = mask_time_min_masks
lowerCAmelCase__ :Tuple = mask_feature_prob
lowerCAmelCase__ :str = mask_feature_length
lowerCAmelCase__ :List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ :Optional[Any] = num_codevectors_per_group
lowerCAmelCase__ :Optional[Any] = num_codevector_groups
lowerCAmelCase__ :Optional[int] = contrastive_logits_temperature
lowerCAmelCase__ :List[Any] = feat_quantizer_dropout
lowerCAmelCase__ :Optional[int] = num_negatives
lowerCAmelCase__ :Any = codevector_dim
lowerCAmelCase__ :List[Any] = proj_codevector_dim
lowerCAmelCase__ :str = diversity_loss_weight
# ctc loss
lowerCAmelCase__ :Tuple = ctc_loss_reduction
lowerCAmelCase__ :Optional[int] = ctc_zero_infinity
# adapter
lowerCAmelCase__ :Dict = add_adapter
lowerCAmelCase__ :Optional[int] = adapter_kernel_size
lowerCAmelCase__ :Dict = adapter_stride
lowerCAmelCase__ :List[str] = num_adapter_layers
lowerCAmelCase__ :Any = output_hidden_size or hidden_size
lowerCAmelCase__ :Any = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase__ :Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase__ :Optional[int] = list(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = list(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = list(__UpperCAmelCase )
lowerCAmelCase__ :Dict = xvector_output_dim
@property
def snake_case ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 93 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__A = """sshleifer/bart-tiny-random"""
__A = """patrickvonplaten/t5-tiny-random"""
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
'''simple docstring'''
return AutoConfig.from_pretrained(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :List[str] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :Optional[int] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :List[Any] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=__UpperCAmelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :Optional[int] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ):
create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=__UpperCAmelCase , d=__UpperCAmelCase )
| 93 | 1 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __A (_SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :Any = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = emb.weight.shape
lowerCAmelCase__ :Tuple = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = emb.weight.data
return lin_layer
def __A (_SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
lowerCAmelCase__ :List[Any] = Namespace(**checkpoint['cfg']['model'] )
lowerCAmelCase__ :List[Any] = checkpoint['model']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = state_dict['decoder.embed_tokens.weight'].shape[0]
lowerCAmelCase__ :Optional[int] = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
lowerCAmelCase__ :Union[str, Any] = XGLMConfig(
vocab_size=_SCREAMING_SNAKE_CASE , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCAmelCase__ :str = XGLMForCausalLM(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
print(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__A = parser.parse_args()
__A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 93 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
__A = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , ) ->Tuple:
"""simple docstring"""
output_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , use_external_data_format=_SCREAMING_SNAKE_CASE , enable_onnx_checker=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , )
else:
export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCAmelCase__ :Tuple = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
lowerCAmelCase__ :List[Any] = 'cpu'
lowerCAmelCase__ :List[str] = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , torch_dtype=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = Path(_SCREAMING_SNAKE_CASE )
# TEXT ENCODER
lowerCAmelCase__ :str = pipeline.text_encoder.config.max_position_embeddings
lowerCAmelCase__ :Dict = pipeline.text_encoder.config.hidden_size
lowerCAmelCase__ :List[Any] = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_SCREAMING_SNAKE_CASE , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=_SCREAMING_SNAKE_CASE , )
del pipeline.text_encoder
# UNET
lowerCAmelCase__ :int = pipeline.unet.config.in_channels
lowerCAmelCase__ :Optional[Any] = pipeline.unet.config.sample_size
lowerCAmelCase__ :Dict = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(2 ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
False,
) , output_path=_SCREAMING_SNAKE_CASE , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=_SCREAMING_SNAKE_CASE , use_external_data_format=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ :List[Any] = str(unet_path.absolute().as_posix() )
lowerCAmelCase__ :int = os.path.dirname(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = onnx.load(_SCREAMING_SNAKE_CASE )
# clean up existing tensor files
shutil.rmtree(_SCREAMING_SNAKE_CASE )
os.mkdir(_SCREAMING_SNAKE_CASE )
# collate external tensor files into one
onnx.save_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , save_as_external_data=_SCREAMING_SNAKE_CASE , all_tensors_to_one_file=_SCREAMING_SNAKE_CASE , location='weights.pb' , convert_attribute=_SCREAMING_SNAKE_CASE , )
del pipeline.unet
# VAE ENCODER
lowerCAmelCase__ :int = pipeline.vae
lowerCAmelCase__ :Optional[Any] = vae_encoder.config.in_channels
lowerCAmelCase__ :int = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowerCAmelCase__ :str = lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : vae_encoder.encode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0].sample()
onnx_export(
_SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_SCREAMING_SNAKE_CASE , )
# VAE DECODER
lowerCAmelCase__ :int = pipeline.vae
lowerCAmelCase__ :List[Any] = vae_decoder.config.latent_channels
lowerCAmelCase__ :Optional[int] = vae_decoder.config.out_channels
# forward only through the decoder part
lowerCAmelCase__ :Any = vae_encoder.decode
onnx_export(
_SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_SCREAMING_SNAKE_CASE , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowerCAmelCase__ :Optional[int] = pipeline.safety_checker
lowerCAmelCase__ :Optional[int] = safety_checker.config.vision_config.num_channels
lowerCAmelCase__ :Any = safety_checker.config.vision_config.image_size
lowerCAmelCase__ :List[str] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=_SCREAMING_SNAKE_CASE , )
del pipeline.safety_checker
lowerCAmelCase__ :Union[str, Any] = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
lowerCAmelCase__ :Dict = pipeline.feature_extractor
else:
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :Optional[int] = None
lowerCAmelCase__ :List[str] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(_SCREAMING_SNAKE_CASE )
print('ONNX pipeline saved to' , _SCREAMING_SNAKE_CASE )
del pipeline
del onnx_pipeline
lowerCAmelCase__ :Dict = OnnxStableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
__A = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 93 | 1 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = {}
def snake_case ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = super().add_tokens(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
if num_added_tokens == 0:
raise ValueError(
F"The tokenizer already contains the token {placeholder_token}. Please pass a different"
' `placeholder_token` that is not already in the tokenizer.' )
def snake_case ( self , __UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase=1 , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = []
if num_vec_per_token == 1:
self.try_adding_tokens(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
output.append(__UpperCAmelCase )
else:
lowerCAmelCase__ :Dict = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ :Tuple = placeholder_token + F"_{i}"
self.try_adding_tokens(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
output.append(__UpperCAmelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"The tokenizer already has placeholder token {token} that can get confused with"
F" {placeholder_token}keep placeholder tokens independent" )
lowerCAmelCase__ :Tuple = output
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=1.0 ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Any = []
for i in range(len(__UpperCAmelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__UpperCAmelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCAmelCase__ :List[str] = self.token_map[placeholder_token]
lowerCAmelCase__ :List[Any] = tokens[: 1 + int(len(__UpperCAmelCase ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCAmelCase__ :Optional[int] = copy.copy(__UpperCAmelCase )
random.shuffle(__UpperCAmelCase )
lowerCAmelCase__ :str = text.replace(__UpperCAmelCase , ' '.join(__UpperCAmelCase ) )
return text
def __call__( self , __UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=1.0 , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
__UpperCAmelCase , vector_shuffle=__UpperCAmelCase , prop_tokens_to_load=__UpperCAmelCase ) , *__UpperCAmelCase , **__UpperCAmelCase , )
def snake_case ( self , __UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=1.0 , **__UpperCAmelCase ):
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
__UpperCAmelCase , vector_shuffle=__UpperCAmelCase , prop_tokens_to_load=__UpperCAmelCase ) , *__UpperCAmelCase , **__UpperCAmelCase , )
| 93 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 1 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__A = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
lowerCAmelCase__ :List[Any] = self.transformer_dir
shutil.copy(
os.path.join(__UpperCAmelCase , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowerCAmelCase__ :List[Any] = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowerCAmelCase__ :Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
lowerCAmelCase__ :int = black.format_str(__UpperCAmelCase , mode=__UpperCAmelCase )
lowerCAmelCase__ :Any = os.path.join(self.transformer_dir , 'new_code.py' )
with open(__UpperCAmelCase , 'w' , newline='\n' ) as f:
f.write(__UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__UpperCAmelCase )
with open(__UpperCAmelCase , 'r' ) as f:
self.assertTrue(f.read() , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , __UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , __UpperCAmelCase ) , )
# Copy consistency with a really long name
lowerCAmelCase__ :Optional[int] = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('Bert' , __UpperCAmelCase , __UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , __UpperCAmelCase , overwrite_result=re.sub('Bert' , 'TestModel' , __UpperCAmelCase ) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = check_copies.LOCALIZED_READMES['README_zh-hans.md']
lowerCAmelCase__ :Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
lowerCAmelCase__ :Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCAmelCase__ :Any = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = check_copies.convert_to_localized_md(
__UpperCAmelCase , __UpperCAmelCase , localized_readme['format_model_list'] )
self.assertFalse(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = check_copies.convert_to_localized_md(
__UpperCAmelCase , __UpperCAmelCase , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
lowerCAmelCase__ :Dict = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCAmelCase__ :Optional[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCAmelCase__ , lowerCAmelCase__ :int = check_copies.convert_to_localized_md(
__UpperCAmelCase , __UpperCAmelCase , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 93 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[str]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_SCREAMING_SNAKE_CASE ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Dict = """wavlm"""
def __init__( self , __UpperCAmelCase=3_2 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=1_2_8 , __UpperCAmelCase=1_6 , __UpperCAmelCase=3_2_0 , __UpperCAmelCase=8_0_0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=1_0 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1_0 , __UpperCAmelCase=3_2_0 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1_0_0 , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __UpperCAmelCase=(5, 3, 3, 1, 1) , __UpperCAmelCase=(1, 2, 3, 1, 1) , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=8_0 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
lowerCAmelCase__ :int = hidden_size
lowerCAmelCase__ :Optional[Any] = feat_extract_norm
lowerCAmelCase__ :List[str] = feat_extract_activation
lowerCAmelCase__ :List[Any] = list(__UpperCAmelCase )
lowerCAmelCase__ :int = list(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = list(__UpperCAmelCase )
lowerCAmelCase__ :str = conv_bias
lowerCAmelCase__ :Any = num_buckets
lowerCAmelCase__ :Optional[Any] = max_bucket_distance
lowerCAmelCase__ :str = num_conv_pos_embeddings
lowerCAmelCase__ :int = num_conv_pos_embedding_groups
lowerCAmelCase__ :Dict = len(self.conv_dim )
lowerCAmelCase__ :int = num_hidden_layers
lowerCAmelCase__ :int = intermediate_size
lowerCAmelCase__ :List[str] = hidden_act
lowerCAmelCase__ :Any = num_attention_heads
lowerCAmelCase__ :Optional[Any] = hidden_dropout
lowerCAmelCase__ :Union[str, Any] = attention_dropout
lowerCAmelCase__ :Tuple = activation_dropout
lowerCAmelCase__ :Union[str, Any] = feat_proj_dropout
lowerCAmelCase__ :Optional[Any] = final_dropout
lowerCAmelCase__ :int = layerdrop
lowerCAmelCase__ :Union[str, Any] = layer_norm_eps
lowerCAmelCase__ :int = initializer_range
lowerCAmelCase__ :Any = num_ctc_classes
lowerCAmelCase__ :int = vocab_size
lowerCAmelCase__ :Dict = do_stable_layer_norm
lowerCAmelCase__ :Tuple = use_weighted_layer_sum
lowerCAmelCase__ :Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ :str = apply_spec_augment
lowerCAmelCase__ :Any = mask_time_prob
lowerCAmelCase__ :Union[str, Any] = mask_time_length
lowerCAmelCase__ :List[Any] = mask_time_min_masks
lowerCAmelCase__ :Dict = mask_feature_prob
lowerCAmelCase__ :Union[str, Any] = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ :Optional[Any] = num_codevectors_per_group
lowerCAmelCase__ :List[str] = num_codevector_groups
lowerCAmelCase__ :Union[str, Any] = contrastive_logits_temperature
lowerCAmelCase__ :List[str] = num_negatives
lowerCAmelCase__ :Union[str, Any] = codevector_dim
lowerCAmelCase__ :Optional[Any] = proj_codevector_dim
lowerCAmelCase__ :Optional[int] = diversity_loss_weight
# ctc loss
lowerCAmelCase__ :Any = ctc_loss_reduction
lowerCAmelCase__ :str = ctc_zero_infinity
# adapter
lowerCAmelCase__ :Optional[int] = add_adapter
lowerCAmelCase__ :Tuple = adapter_kernel_size
lowerCAmelCase__ :Tuple = adapter_stride
lowerCAmelCase__ :List[Any] = num_adapter_layers
lowerCAmelCase__ :Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase__ :Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase__ :int = list(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = list(__UpperCAmelCase )
lowerCAmelCase__ :Any = list(__UpperCAmelCase )
lowerCAmelCase__ :Any = xvector_output_dim
@property
def snake_case ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 93 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = tempfile.mkdtemp()
lowerCAmelCase__ :List[Any] = BlipImageProcessor()
lowerCAmelCase__ :Union[str, Any] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowerCAmelCase__ :Union[str, Any] = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ :List[str] = InstructBlipProcessor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).qformer_tokenizer
def snake_case ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase__ :Dict = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase__ :str = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
lowerCAmelCase__ :Dict = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.get_image_processor()
lowerCAmelCase__ :List[Any] = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase__ :str = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Dict = self.prepare_image_inputs()
lowerCAmelCase__ :List[str] = image_processor(__UpperCAmelCase , return_tensors='np' )
lowerCAmelCase__ :Optional[Any] = processor(images=__UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.get_image_processor()
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase__ :Any = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 'lower newer'
lowerCAmelCase__ :Dict = processor(text=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = qformer_tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.get_image_processor()
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
lowerCAmelCase__ :str = self.get_qformer_tokenizer()
lowerCAmelCase__ :Dict = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = 'lower newer'
lowerCAmelCase__ :Dict = self.prepare_image_inputs()
lowerCAmelCase__ :Tuple = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_image_processor()
lowerCAmelCase__ :Dict = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase__ :int = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ :Tuple = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_image_processor()
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
lowerCAmelCase__ :Dict = self.get_qformer_tokenizer()
lowerCAmelCase__ :Optional[Any] = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 'lower newer'
lowerCAmelCase__ :Optional[int] = self.prepare_image_inputs()
lowerCAmelCase__ :int = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 93 | 1 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :list[list[int]] = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowerCAmelCase__ :Union[str, Any] = 1
for n in range(m + 1 ):
for k in range(1 , _SCREAMING_SNAKE_CASE ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__A = int(input("""Enter a number: """).strip())
print(partition(n))
except ValueError:
print("""Please enter a number.""")
else:
try:
__A = int(sys.argv[1])
print(partition(n))
except ValueError:
print("""Please pass a number.""")
| 93 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__A = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
F" reinstalling {pkg}." )
if not ops[op](version.parse(_SCREAMING_SNAKE_CASE ) , version.parse(_SCREAMING_SNAKE_CASE ) ):
raise ImportError(
F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) ->None:
"""simple docstring"""
lowerCAmelCase__ :List[str] = F"\n{hint}" if hint is not None else ''
# non-versioned check
if re.match(r'^[\w_\-\d]+$' , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = requirement, None, None
else:
lowerCAmelCase__ :List[str] = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , _SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F" got {requirement}" )
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = match[0]
lowerCAmelCase__ :List[Any] = want_full.split(',' ) # there could be multiple requirements
lowerCAmelCase__ :Any = {}
for w in want_range:
lowerCAmelCase__ :Tuple = re.findall(r'^([\s!=<>]{1,2})(.+)' , _SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F" but got {requirement}" )
lowerCAmelCase__ , lowerCAmelCase__ :int = match[0]
lowerCAmelCase__ :str = want_ver
if op not in ops:
raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
lowerCAmelCase__ :Any = '.'.join([str(_SCREAMING_SNAKE_CASE ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return
# check if any version is installed
try:
lowerCAmelCase__ :List[Any] = importlib.metadata.version(_SCREAMING_SNAKE_CASE )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 93 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :int
__magic_name__ :int
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :list[list[Edge]] = [[] for _ in range(__UpperCAmelCase )]
lowerCAmelCase__ :Any = size
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def snake_case ( self ):
'''simple docstring'''
return self._size
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(__UpperCAmelCase , __UpperCAmelCase ) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = deque([start_vertex] )
lowerCAmelCase__ :list[int | None] = [None] * self.size
lowerCAmelCase__ :Any = 0
while queue:
lowerCAmelCase__ :Union[str, Any] = queue.popleft()
lowerCAmelCase__ :Any = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCAmelCase__ :Any = current_distance + edge.weight
lowerCAmelCase__ :Optional[int] = distances[edge.destination_vertex]
if (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
lowerCAmelCase__ :Dict = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :int = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def snake_case ( self , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__UpperCAmelCase ) )
lowerCAmelCase__ :List[str] = np.random.RandomState(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.get_dummy_inputs()
lowerCAmelCase__ :Optional[int] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Union[str, Any] = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :str = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs()
lowerCAmelCase__ :Optional[Any] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :int = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# warmup pass to apply optimizations
lowerCAmelCase__ :List[Any] = pipe(**self.get_dummy_inputs() )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs()
lowerCAmelCase__ :int = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Union[str, Any] = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Any = self.get_dummy_inputs()
lowerCAmelCase__ :List[str] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Optional[int] = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs()
lowerCAmelCase__ :Any = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :int = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Any = self.get_dummy_inputs()
lowerCAmelCase__ :List[Any] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Optional[Any] = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = ort.SessionOptions()
lowerCAmelCase__ :Optional[int] = False
return options
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCAmelCase__ :Any = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
lowerCAmelCase__ :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = 'A fantasy landscape, trending on artstation'
lowerCAmelCase__ :Optional[Any] = np.random.RandomState(0 )
lowerCAmelCase__ :List[str] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__UpperCAmelCase , output_type='np' , )
lowerCAmelCase__ :Any = output.images
lowerCAmelCase__ :List[str] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowerCAmelCase__ :List[Any] = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCAmelCase__ :Optional[Any] = init_image.resize((7_6_8, 5_1_2) )
lowerCAmelCase__ :List[Any] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowerCAmelCase__ :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = 'A fantasy landscape, trending on artstation'
lowerCAmelCase__ :List[Any] = np.random.RandomState(0 )
lowerCAmelCase__ :List[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__UpperCAmelCase , output_type='np' , )
lowerCAmelCase__ :Optional[Any] = output.images
lowerCAmelCase__ :int = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowerCAmelCase__ :List[Any] = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 93 | 1 |
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
return getitem, k
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
"""simple docstring"""
return setitem, k, v
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
return delitem, k
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
try:
return fun(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ), None
except Exception as e:
return None, e
__A = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
__A = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
__A = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
__A = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
__A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Tuple = HashMap(initial_block_size=4 )
lowerCAmelCase__ :str = {}
for _, (fun, *args) in enumerate(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = _run_operation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = _run_operation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
assert my_res == py_res
assert str(_SCREAMING_SNAKE_CASE ) == str(_SCREAMING_SNAKE_CASE )
assert set(_SCREAMING_SNAKE_CASE ) == set(_SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
assert set(my.items() ) == set(py.items() )
def __A () ->str:
"""simple docstring"""
def is_public(_SCREAMING_SNAKE_CASE ) -> bool:
return not name.startswith('_' )
lowerCAmelCase__ :Tuple = {name for name in dir({} ) if is_public(_SCREAMING_SNAKE_CASE )}
lowerCAmelCase__ :Optional[Any] = {name for name in dir(HashMap() ) if is_public(_SCREAMING_SNAKE_CASE )}
assert dict_public_names > hash_public_names
| 93 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__A = ["""gpt2"""]
__A = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :List[str] = tokenizer
lowerCAmelCase__ :Optional[int] = AutoConfig.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = TFGPTaLMHeadModel.from_config(__UpperCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = self.tokenizer(__UpperCAmelCase )
lowerCAmelCase__ :int = tokenized['input_ids'].to_tensor()
lowerCAmelCase__ :Optional[Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCAmelCase__ :int = self.model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ :List[str] = [GPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCAmelCase__ :Union[str, Any] = [TFGPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCAmelCase__ :int = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
lowerCAmelCase__ :str = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def snake_case ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowerCAmelCase__ :int = tokenizer([test_inputs] , return_tensors='tf' )
lowerCAmelCase__ :Optional[int] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCAmelCase__ :Optional[int] = python_outputs[key].numpy()
lowerCAmelCase__ :List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__UpperCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase__ :Optional[int] = tf.function(__UpperCAmelCase )
for test_inputs in self.test_sentences:
lowerCAmelCase__ :Any = tf.constant(__UpperCAmelCase )
lowerCAmelCase__ :int = compiled_tokenizer(__UpperCAmelCase )
lowerCAmelCase__ :Any = tf_tokenizer(__UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase__ :List[str] = ModelToSave(tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase__ :List[str] = model.serving(__UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCAmelCase__ :Union[str, Any] = Path(__UpperCAmelCase ) / 'saved.model'
tf.saved_model.save(__UpperCAmelCase , __UpperCAmelCase , signatures={'serving_default': model.serving} )
lowerCAmelCase__ :str = tf.saved_model.load(__UpperCAmelCase )
lowerCAmelCase__ :Dict = loaded_model.signatures['serving_default'](__UpperCAmelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase__ :List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase__ :List[str] = tf_tokenizer(__UpperCAmelCase ) # Build model with some sample inputs
lowerCAmelCase__ :Union[str, Any] = tf_tokenizer.get_config()
lowerCAmelCase__ :Tuple = TFGPTaTokenizer.from_config(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = model_from_config(__UpperCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCAmelCase__ :int = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
lowerCAmelCase__ :Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase__ :List[str] = tf_tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 93 | 1 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCAmelCase ( a , a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :int = StableUnCLIPPipeline
__magic_name__ :int = TEXT_TO_IMAGE_PARAMS
__magic_name__ :List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ :Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__ :Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__magic_name__ :List[str] = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 3_2
lowerCAmelCase__ :List[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=__UpperCAmelCase , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ :int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=__UpperCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=__UpperCAmelCase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase__ :Dict = StableUnCLIPImageNormalizer(embedding_dim=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase__ :str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__UpperCAmelCase , layers_per_block=1 , upcast_attention=__UpperCAmelCase , use_linear_projection=__UpperCAmelCase , )
torch.manual_seed(0 )
lowerCAmelCase__ :Tuple = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = AutoencoderKL()
lowerCAmelCase__ :Optional[int] = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :str = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[int] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__UpperCAmelCase )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
lowerCAmelCase__ :List[Any] = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ :List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ :List[str] = pipe('anime turle' , generator=__UpperCAmelCase , output_type='np' )
lowerCAmelCase__ :Union[str, Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ :int = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
lowerCAmelCase__ :List[str] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ :Tuple = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ :str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 93 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __A (_SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = 384
if "tiny" in model_name:
lowerCAmelCase__ :List[Any] = [3, 3, 9, 3]
lowerCAmelCase__ :Tuple = [96, 192, 384, 768]
if "small" in model_name:
lowerCAmelCase__ :Union[str, Any] = [3, 3, 27, 3]
lowerCAmelCase__ :Any = [96, 192, 384, 768]
if "base" in model_name:
lowerCAmelCase__ :Dict = [3, 3, 27, 3]
lowerCAmelCase__ :Any = [128, 256, 512, 1024]
lowerCAmelCase__ :Union[str, Any] = 512
if "large" in model_name:
lowerCAmelCase__ :int = [3, 3, 27, 3]
lowerCAmelCase__ :Any = [192, 384, 768, 1536]
lowerCAmelCase__ :Optional[Any] = 768
if "xlarge" in model_name:
lowerCAmelCase__ :Optional[Any] = [3, 3, 27, 3]
lowerCAmelCase__ :str = [256, 512, 1024, 2048]
lowerCAmelCase__ :Union[str, Any] = 1024
# set label information
lowerCAmelCase__ :Tuple = 150
lowerCAmelCase__ :List[Any] = 'huggingface/label-files'
lowerCAmelCase__ :Tuple = 'ade20k-id2label.json'
lowerCAmelCase__ :Tuple = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ :Dict = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase__ :int = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ :List[str] = ConvNextConfig(
depths=_SCREAMING_SNAKE_CASE , hidden_sizes=_SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
lowerCAmelCase__ :Union[str, Any] = UperNetConfig(
backbone_config=_SCREAMING_SNAKE_CASE , auxiliary_in_channels=_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE , )
return config
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :str = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = val
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Dict = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
lowerCAmelCase__ :List[Any] = model_name_to_url[model_name]
lowerCAmelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' )['state_dict']
lowerCAmelCase__ :List[Any] = get_upernet_config(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = UperNetForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCAmelCase__ :Optional[int] = state_dict.pop(_SCREAMING_SNAKE_CASE )
if "bn" in key:
lowerCAmelCase__ :Any = key.replace('bn' , 'batch_norm' )
lowerCAmelCase__ :int = val
# rename keys
lowerCAmelCase__ :Optional[Any] = create_rename_keys(_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify on image
lowerCAmelCase__ :str = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
lowerCAmelCase__ :Optional[Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
lowerCAmelCase__ :Tuple = SegformerImageProcessor()
lowerCAmelCase__ :List[Any] = processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
lowerCAmelCase__ :Optional[Any] = model(_SCREAMING_SNAKE_CASE )
if model_name == "upernet-convnext-tiny":
lowerCAmelCase__ :Optional[Any] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
lowerCAmelCase__ :Union[str, Any] = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
lowerCAmelCase__ :Dict = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
lowerCAmelCase__ :List[str] = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
lowerCAmelCase__ :Optional[Any] = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F'''upernet-convnext-{size}''' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 93 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = DebertaTokenizer
__magic_name__ :str = True
__magic_name__ :Dict = DebertaTokenizerFast
def snake_case ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ :Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
lowerCAmelCase__ :Union[str, Any] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ :Dict = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCAmelCase__ :int = {'unk_token': '[UNK]'}
lowerCAmelCase__ :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCAmelCase ) )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = 'lower newer'
lowerCAmelCase__ :List[str] = 'lower newer'
return input_text, output_text
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.get_tokenizer()
lowerCAmelCase__ :List[Any] = 'lower newer'
lowerCAmelCase__ :Optional[int] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCAmelCase__ :Tuple = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Any = tokens + [tokenizer.unk_token]
lowerCAmelCase__ :str = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = tokenizer('Hello' , 'World' )
lowerCAmelCase__ :List[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , __UpperCAmelCase )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
lowerCAmelCase__ :Optional[Any] = tokenizer.encode('sequence builders' , add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :str = tokenizer.encode('multi-sequence build' , add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = tokenizer.encode(
'sequence builders' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ :Any = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
lowerCAmelCase__ :str = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCAmelCase__ :Tuple = tokenizer_class.from_pretrained('microsoft/deberta-base' )
lowerCAmelCase__ :Optional[int] = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
lowerCAmelCase__ :Optional[int] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = [tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) for seq in encoding['input_ids']]
# fmt: off
lowerCAmelCase__ :Tuple = {
'input_ids': [
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCAmelCase__ :List[Any] = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , __UpperCAmelCase )
for expected, decoded in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 93 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__A = logging.get_logger(__name__)
__A = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__A = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
__A = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = VOCAB_FILES_NAMES
__magic_name__ :List[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ :str = ["""input_ids""", """attention_mask"""]
__magic_name__ :Any = RobertaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Optional[int] = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
lowerCAmelCase__ :List[Any] = add_prefix_space
lowerCAmelCase__ :str = pre_tok_class(**__UpperCAmelCase )
lowerCAmelCase__ :List[str] = add_prefix_space
lowerCAmelCase__ :str = 'post_processor'
lowerCAmelCase__ :Optional[Any] = getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
if tokenizer_component_instance:
lowerCAmelCase__ :Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase__ :Any = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase__ :int = tuple(state['cls'] )
lowerCAmelCase__ :List[Any] = False
if state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Union[str, Any] = add_prefix_space
lowerCAmelCase__ :Any = True
if state.get('trim_offsets' , __UpperCAmelCase ) != trim_offsets:
lowerCAmelCase__ :Union[str, Any] = trim_offsets
lowerCAmelCase__ :Optional[int] = True
if changes_to_apply:
lowerCAmelCase__ :str = getattr(__UpperCAmelCase , state.pop('type' ) )
lowerCAmelCase__ :Any = component_class(**__UpperCAmelCase )
setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
@property
def snake_case ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else value
lowerCAmelCase__ :List[str] = value
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = [self.sep_token_id]
lowerCAmelCase__ :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 93 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 |
"""simple docstring"""
from __future__ import annotations
import math
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__A = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def __A (_SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
lowerCAmelCase__ :Tuple = []
for num in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :int = 0
while 2 * i * i <= odd_composites[num]:
lowerCAmelCase__ :int = odd_composites[num] - 2 * i * i
if is_prime(_SCREAMING_SNAKE_CASE ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_SCREAMING_SNAKE_CASE ) == n:
return list_nums
return []
def __A () ->int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 93 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"""vocab_file""": """sentencepiece.bpe.model"""}
__A = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
__A = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
__A = """▁"""
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = VOCAB_FILES_NAMES
__magic_name__ :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ :Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Any = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
lowerCAmelCase__ :Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
lowerCAmelCase__ :Dict = vocab_file
lowerCAmelCase__ :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
lowerCAmelCase__ :Optional[int] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
lowerCAmelCase__ :Tuple = len(self.sp_model ) - 1
lowerCAmelCase__ :Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ :int = [self.cls_token_id]
lowerCAmelCase__ :Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case ( self ):
'''simple docstring'''
return len(self.sp_model )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ :Any = self.sp_model.PieceToId(__UpperCAmelCase )
return spm_id if spm_id else self.unk_token_id
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = []
lowerCAmelCase__ :str = ''
lowerCAmelCase__ :Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
lowerCAmelCase__ :List[str] = True
lowerCAmelCase__ :List[Any] = []
else:
current_sub_tokens.append(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def __getstate__( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.__dict__.copy()
lowerCAmelCase__ :List[str] = None
return state
def __setstate__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase__ :List[Any] = {}
lowerCAmelCase__ :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ :Tuple = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , 'wb' ) as fi:
lowerCAmelCase__ :List[Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 93 |
"""simple docstring"""
import re
def __A (_SCREAMING_SNAKE_CASE ) ->list:
"""simple docstring"""
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )]
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
try:
lowerCAmelCase__ :Any = split_input(_SCREAMING_SNAKE_CASE )
if upper:
lowerCAmelCase__ :str = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCAmelCase__ :int = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_simple_case(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
try:
lowerCAmelCase__ :str = to_simple_case(_SCREAMING_SNAKE_CASE )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '_' )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '-' )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 93 | 1 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = 0
lowerCAmelCase__ :Union[str, Any] = number
while duplicate > 0:
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = divmod(_SCREAMING_SNAKE_CASE , 10 )
fact_sum += factorial(_SCREAMING_SNAKE_CASE )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
__A = int(input("""Enter number: """).strip())
print(
F'''{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'''
)
| 93 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
__A = """"""
__A = """"""
__A = """"""
__A = """"""
def __A (_SCREAMING_SNAKE_CASE ) ->None:
"""simple docstring"""
lowerCAmelCase__ :Any = tweepy.OAuthHandler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
auth.set_access_token(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = tweepy.API(_SCREAMING_SNAKE_CASE )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase__ :Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase__ :Optional[Any] = api.user_timeline(screen_name=_SCREAMING_SNAKE_CASE , count=200 )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# save the id of the oldest tweet less one
lowerCAmelCase__ :Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_SCREAMING_SNAKE_CASE ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase__ :Union[str, Any] = api.user_timeline(
screen_name=_SCREAMING_SNAKE_CASE , count=200 , max_id=_SCREAMING_SNAKE_CASE )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# update the id of the oldest tweet less one
lowerCAmelCase__ :Tuple = alltweets[-1].id - 1
print(F"...{len(_SCREAMING_SNAKE_CASE )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase__ :Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , 'w' ) as f:
lowerCAmelCase__ :List[str] = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 93 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A = {
"""configuration_vision_text_dual_encoder""": ["""VisionTextDualEncoderConfig"""],
"""processing_vision_text_dual_encoder""": ["""VisionTextDualEncoderProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""VisionTextDualEncoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""FlaxVisionTextDualEncoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""TFVisionTextDualEncoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 93 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCAmelCase ( a , a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :int = StableUnCLIPPipeline
__magic_name__ :int = TEXT_TO_IMAGE_PARAMS
__magic_name__ :List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ :Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__ :Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__magic_name__ :List[str] = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 3_2
lowerCAmelCase__ :List[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=__UpperCAmelCase , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ :int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=__UpperCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=__UpperCAmelCase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase__ :Dict = StableUnCLIPImageNormalizer(embedding_dim=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase__ :str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__UpperCAmelCase , layers_per_block=1 , upcast_attention=__UpperCAmelCase , use_linear_projection=__UpperCAmelCase , )
torch.manual_seed(0 )
lowerCAmelCase__ :Tuple = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = AutoencoderKL()
lowerCAmelCase__ :Optional[int] = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :str = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[int] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__UpperCAmelCase )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
lowerCAmelCase__ :List[Any] = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ :List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ :List[str] = pipe('anime turle' , generator=__UpperCAmelCase , output_type='np' )
lowerCAmelCase__ :Union[str, Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ :int = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
lowerCAmelCase__ :List[str] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ :Tuple = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ :str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 93 | 1 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
lowerCAmelCase__ :Any = False
if num < 0:
lowerCAmelCase__ :Tuple = True
lowerCAmelCase__ :Optional[Any] = -num
lowerCAmelCase__ :list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_SCREAMING_SNAKE_CASE ) for e in binary )
return "0b" + "".join(str(_SCREAMING_SNAKE_CASE ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
__A = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[str] = field(
default=a , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(a )} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """The input training data file (a text file)."""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Whether ot not to use whole word mask."""} )
__magic_name__ :float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
__magic_name__ :float = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
__magic_name__ :int = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
__magic_name__ :int = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) ->Optional[int]:
"""simple docstring"""
def _dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , ref_path=_SCREAMING_SNAKE_CASE , )
return LineByLineTextDataset(tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_SCREAMING_SNAKE_CASE , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_SCREAMING_SNAKE_CASE ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCAmelCase__ :Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ :List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowerCAmelCase__ :List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
lowerCAmelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ :str = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
lowerCAmelCase__ :Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
lowerCAmelCase__ :int = AutoModelWithLMHead.from_config(_SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(_SCREAMING_SNAKE_CASE ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
lowerCAmelCase__ :Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCAmelCase__ :Any = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowerCAmelCase__ :List[str] = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCAmelCase__ :Optional[int] = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , evaluate=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCAmelCase__ :str = DataCollatorForPermutationLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCAmelCase__ :Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
else:
lowerCAmelCase__ :str = DataCollatorForLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase__ :Tuple = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , prediction_loss_only=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowerCAmelCase__ :Tuple = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_SCREAMING_SNAKE_CASE )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ :Optional[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase__ :Any = trainer.evaluate()
lowerCAmelCase__ :Optional[Any] = math.exp(eval_output['eval_loss'] )
lowerCAmelCase__ :Dict = {'perplexity': perplexity}
lowerCAmelCase__ :List[Any] = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(_SCREAMING_SNAKE_CASE )
return results
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 93 | 1 |
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :Any = [x.strip() for x in open(_SCREAMING_SNAKE_CASE ).readlines()]
lowerCAmelCase__ :Any = [x.strip() for x in open(_SCREAMING_SNAKE_CASE ).readlines()][: len(_SCREAMING_SNAKE_CASE )]
lowerCAmelCase__ :Tuple = calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if save_path is not None:
save_json(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 93 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :torch.FloatTensor
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
@register_to_config
def __init__( self , __UpperCAmelCase = 1_6 , __UpperCAmelCase = 8_8 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 3_2 , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = "geglu" , __UpperCAmelCase = True , __UpperCAmelCase = True , ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :Dict = num_attention_heads
lowerCAmelCase__ :Any = attention_head_dim
lowerCAmelCase__ :Optional[int] = num_attention_heads * attention_head_dim
lowerCAmelCase__ :Any = in_channels
lowerCAmelCase__ :str = torch.nn.GroupNorm(num_groups=__UpperCAmelCase , num_channels=__UpperCAmelCase , eps=1E-6 , affine=__UpperCAmelCase )
lowerCAmelCase__ :int = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
# 3. Define transformers blocks
lowerCAmelCase__ :List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , dropout=__UpperCAmelCase , cross_attention_dim=__UpperCAmelCase , activation_fn=__UpperCAmelCase , attention_bias=__UpperCAmelCase , double_self_attention=__UpperCAmelCase , norm_elementwise_affine=__UpperCAmelCase , )
for d in range(__UpperCAmelCase )
] )
lowerCAmelCase__ :List[Any] = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase = True , ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = hidden_states.shape
lowerCAmelCase__ :Tuple = batch_frames // num_frames
lowerCAmelCase__ :str = hidden_states
lowerCAmelCase__ :Union[str, Any] = hidden_states[None, :].reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :str = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCAmelCase__ :Optional[int] = self.norm(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = self.proj_in(__UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowerCAmelCase__ :Optional[int] = block(
__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase , cross_attention_kwargs=__UpperCAmelCase , class_labels=__UpperCAmelCase , )
# 3. Output
lowerCAmelCase__ :Any = self.proj_out(__UpperCAmelCase )
lowerCAmelCase__ :Dict = (
hidden_states[None, None, :]
.reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCAmelCase__ :Optional[Any] = hidden_states.reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__UpperCAmelCase )
| 93 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ :List[str] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase__ :Tuple = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ :int = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase__ :List[Any] = {'unk_token': '<unk>'}
lowerCAmelCase__ :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCAmelCase ) )
lowerCAmelCase__ :str = {
'do_resize': True,
'size': 2_0,
'do_center_crop': True,
'crop_size': 1_8,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCAmelCase__ :Optional[Any] = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase__ :Optional[Any] = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase__ :str = self.get_rust_tokenizer()
lowerCAmelCase__ :Optional[Any] = self.get_image_processor()
lowerCAmelCase__ :Optional[Any] = CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ :Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ :int = CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ :List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ :str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase__ :Optional[Any] = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
lowerCAmelCase__ :str = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.get_image_processor()
lowerCAmelCase__ :Union[str, Any] = self.get_tokenizer()
lowerCAmelCase__ :Tuple = CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ :Any = self.prepare_image_inputs()
lowerCAmelCase__ :int = image_processor(__UpperCAmelCase , return_tensors='np' )
lowerCAmelCase__ :Optional[Any] = processor(images=__UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.get_image_processor()
lowerCAmelCase__ :Dict = self.get_tokenizer()
lowerCAmelCase__ :Tuple = CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ :Any = 'lower newer'
lowerCAmelCase__ :Any = processor(text=__UpperCAmelCase )
lowerCAmelCase__ :int = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.get_image_processor()
lowerCAmelCase__ :int = self.get_tokenizer()
lowerCAmelCase__ :Any = CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = 'lower newer'
lowerCAmelCase__ :Tuple = self.prepare_image_inputs()
lowerCAmelCase__ :Any = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.get_image_processor()
lowerCAmelCase__ :Union[str, Any] = self.get_tokenizer()
lowerCAmelCase__ :Any = CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = self.prepare_image_inputs()
lowerCAmelCase__ :int = self.prepare_image_inputs()
lowerCAmelCase__ :Dict = processor(images=__UpperCAmelCase , visual_prompt=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.get_image_processor()
lowerCAmelCase__ :Any = self.get_tokenizer()
lowerCAmelCase__ :Dict = CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ :List[Any] = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 93 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=3_0 , __UpperCAmelCase=4_0_0 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 2_5_5 , __UpperCAmelCase=True , ):
'''simple docstring'''
lowerCAmelCase__ :Any = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
lowerCAmelCase__ :List[Any] = parent
lowerCAmelCase__ :int = batch_size
lowerCAmelCase__ :Union[str, Any] = num_channels
lowerCAmelCase__ :Any = min_resolution
lowerCAmelCase__ :Dict = max_resolution
lowerCAmelCase__ :Dict = do_resize
lowerCAmelCase__ :Optional[Any] = size
lowerCAmelCase__ :List[str] = do_normalize
lowerCAmelCase__ :str = image_mean
lowerCAmelCase__ :Tuple = image_std
lowerCAmelCase__ :Dict = do_rescale
lowerCAmelCase__ :Tuple = rescale_factor
lowerCAmelCase__ :Optional[int] = do_pad
def snake_case ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if not batched:
lowerCAmelCase__ :str = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = image.size
else:
lowerCAmelCase__ , lowerCAmelCase__ :str = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ :int = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase__ :List[str] = self.size['shortest_edge']
elif w > h:
lowerCAmelCase__ :Union[str, Any] = self.size['shortest_edge']
lowerCAmelCase__ :Any = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase__ :int = self.size['shortest_edge']
lowerCAmelCase__ :Union[str, Any] = self.size['shortest_edge']
else:
lowerCAmelCase__ :Optional[Any] = []
for image in image_inputs:
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ :List[str] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
lowerCAmelCase__ :List[Any] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Any = DetaImageProcessor if is_vision_available() else None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = DetaImageProcessingTester(self )
@property
def snake_case ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_rescale' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_pad' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'size' ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
lowerCAmelCase__ :Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
lowerCAmelCase__ :List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ :Tuple = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase__ :Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ :str = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ :Dict = json.loads(f.read() )
lowerCAmelCase__ :int = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
lowerCAmelCase__ :int = DetaImageProcessor()
lowerCAmelCase__ :List[Any] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ :str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ :Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCAmelCase ) )
# verify boxes
lowerCAmelCase__ :Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Dict = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ :Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCAmelCase ) )
# verify is_crowd
lowerCAmelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCAmelCase ) )
# verify class_labels
lowerCAmelCase__ :Optional[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCAmelCase ) )
# verify orig_size
lowerCAmelCase__ :str = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCAmelCase ) )
# verify size
lowerCAmelCase__ :Any = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCAmelCase ) )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ :Dict = json.loads(f.read() )
lowerCAmelCase__ :Dict = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
lowerCAmelCase__ :Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCAmelCase__ :Dict = DetaImageProcessor(format='coco_panoptic' )
lowerCAmelCase__ :Optional[int] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ :str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ :Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCAmelCase ) )
# verify boxes
lowerCAmelCase__ :int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ :Optional[int] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCAmelCase ) )
# verify is_crowd
lowerCAmelCase__ :Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCAmelCase ) )
# verify class_labels
lowerCAmelCase__ :List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCAmelCase ) )
# verify masks
lowerCAmelCase__ :Optional[int] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __UpperCAmelCase )
# verify orig_size
lowerCAmelCase__ :Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCAmelCase ) )
# verify size
lowerCAmelCase__ :Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCAmelCase ) )
| 93 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__A = logging.get_logger(__name__)
__A = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = """deberta-v2"""
def __init__( self , __UpperCAmelCase=1_2_8_1_0_0 , __UpperCAmelCase=1_5_3_6 , __UpperCAmelCase=2_4 , __UpperCAmelCase=2_4 , __UpperCAmelCase=6_1_4_4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-7 , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=0 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=0 , __UpperCAmelCase="gelu" , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = hidden_size
lowerCAmelCase__ :Optional[int] = num_hidden_layers
lowerCAmelCase__ :Optional[int] = num_attention_heads
lowerCAmelCase__ :int = intermediate_size
lowerCAmelCase__ :Dict = hidden_act
lowerCAmelCase__ :Tuple = hidden_dropout_prob
lowerCAmelCase__ :Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ :Optional[Any] = max_position_embeddings
lowerCAmelCase__ :Optional[Any] = type_vocab_size
lowerCAmelCase__ :List[Any] = initializer_range
lowerCAmelCase__ :Dict = relative_attention
lowerCAmelCase__ :Union[str, Any] = max_relative_positions
lowerCAmelCase__ :List[str] = pad_token_id
lowerCAmelCase__ :Optional[int] = position_biased_input
# Backwards compatibility
if type(__UpperCAmelCase ) == str:
lowerCAmelCase__ :Dict = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCAmelCase__ :str = pos_att_type
lowerCAmelCase__ :Dict = vocab_size
lowerCAmelCase__ :Tuple = layer_norm_eps
lowerCAmelCase__ :Optional[Any] = kwargs.get('pooler_hidden_size' , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = pooler_dropout
lowerCAmelCase__ :int = pooler_hidden_act
class _lowerCAmelCase ( a ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ :str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase__ :Tuple = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def snake_case ( self ):
'''simple docstring'''
return 1_2
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = 3 , __UpperCAmelCase = 4_0 , __UpperCAmelCase = 4_0 , __UpperCAmelCase = None , ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = super().generate_dummy_inputs(preprocessor=__UpperCAmelCase , framework=__UpperCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 93 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = int(_SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ :int = divmod(_SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(_SCREAMING_SNAKE_CASE ) + str(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :str = str(_SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('No input value was provided' )
lowerCAmelCase__ :Dict = '-' if number.startswith('-' ) else ''
lowerCAmelCase__ :int = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return F"{negative}0b{binary_recursive(int(_SCREAMING_SNAKE_CASE ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ :Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ :List[str] = ''
else:
lowerCAmelCase__ :List[str] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ :List[Any] = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ :int = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ :Optional[int] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ :Optional[Any] = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ :Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ :Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ :Dict = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ :str = in_proj_bias[-config.hidden_size :]
def __A (_SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = val
def __A () ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ :Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ :List[str] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
lowerCAmelCase__ :int = ViTConfig()
lowerCAmelCase__ :Dict = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCAmelCase__ :Union[str, Any] = True
lowerCAmelCase__ :Any = int(vit_name[-12:-10] )
lowerCAmelCase__ :Any = int(vit_name[-9:-6] )
else:
lowerCAmelCase__ :int = 1000
lowerCAmelCase__ :Optional[Any] = 'huggingface/label-files'
lowerCAmelCase__ :Optional[Any] = 'imagenet-1k-id2label.json'
lowerCAmelCase__ :Tuple = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ :Dict = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase__ :Any = idalabel
lowerCAmelCase__ :Optional[Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ :List[Any] = int(vit_name[-6:-4] )
lowerCAmelCase__ :Union[str, Any] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
lowerCAmelCase__ :List[Any] = 192
lowerCAmelCase__ :Dict = 768
lowerCAmelCase__ :Union[str, Any] = 12
lowerCAmelCase__ :List[Any] = 3
elif vit_name[9:].startswith('small' ):
lowerCAmelCase__ :int = 384
lowerCAmelCase__ :Tuple = 1536
lowerCAmelCase__ :List[Any] = 12
lowerCAmelCase__ :List[Any] = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
lowerCAmelCase__ :Tuple = 768
lowerCAmelCase__ :Dict = 2304
lowerCAmelCase__ :Tuple = 8
lowerCAmelCase__ :List[Any] = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
lowerCAmelCase__ :Optional[Any] = 1024
lowerCAmelCase__ :Union[str, Any] = 4096
lowerCAmelCase__ :List[Any] = 24
lowerCAmelCase__ :Tuple = 16
elif vit_name[4:].startswith('huge' ):
lowerCAmelCase__ :Dict = 1280
lowerCAmelCase__ :Union[str, Any] = 5120
lowerCAmelCase__ :Union[str, Any] = 32
lowerCAmelCase__ :Tuple = 16
# load original model from timm
lowerCAmelCase__ :List[str] = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ :Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase__ :str = ViTModel(_SCREAMING_SNAKE_CASE ).eval()
else:
lowerCAmelCase__ :List[Any] = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCAmelCase__ :Optional[Any] = DeiTImageProcessor(size=config.image_size )
else:
lowerCAmelCase__ :str = ViTImageProcessor(size=config.image_size )
lowerCAmelCase__ :List[str] = image_processor(images=prepare_img() , return_tensors='pt' )
lowerCAmelCase__ :List[Any] = encoding['pixel_values']
lowerCAmelCase__ :Any = model(_SCREAMING_SNAKE_CASE )
if base_model:
lowerCAmelCase__ :Dict = timm_model.forward_features(_SCREAMING_SNAKE_CASE )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1e-3 )
else:
lowerCAmelCase__ :Tuple = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__A = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 93 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Tuple = """WhisperFeatureExtractor"""
__magic_name__ :Dict = """WhisperTokenizer"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :str = self.feature_extractor
lowerCAmelCase__ :Any = False
def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=__UpperCAmelCase , language=__UpperCAmelCase , no_timestamps=__UpperCAmelCase )
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = kwargs.pop('audio' , __UpperCAmelCase )
lowerCAmelCase__ :int = kwargs.pop('sampling_rate' , __UpperCAmelCase )
lowerCAmelCase__ :str = kwargs.pop('text' , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
lowerCAmelCase__ :int = args[0]
lowerCAmelCase__ :Tuple = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCAmelCase__ :List[str] = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ :Optional[int] = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase__ :Optional[int] = encodings['input_ids']
return inputs
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="np" ):
'''simple docstring'''
return self.tokenizer.get_prompt_ids(__UpperCAmelCase , return_tensors=__UpperCAmelCase )
| 93 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__A = logging.get_logger(__name__)
__A = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = """deberta-v2"""
def __init__( self , __UpperCAmelCase=1_2_8_1_0_0 , __UpperCAmelCase=1_5_3_6 , __UpperCAmelCase=2_4 , __UpperCAmelCase=2_4 , __UpperCAmelCase=6_1_4_4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-7 , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=0 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=0 , __UpperCAmelCase="gelu" , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = hidden_size
lowerCAmelCase__ :Optional[int] = num_hidden_layers
lowerCAmelCase__ :Optional[int] = num_attention_heads
lowerCAmelCase__ :int = intermediate_size
lowerCAmelCase__ :Dict = hidden_act
lowerCAmelCase__ :Tuple = hidden_dropout_prob
lowerCAmelCase__ :Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ :Optional[Any] = max_position_embeddings
lowerCAmelCase__ :Optional[Any] = type_vocab_size
lowerCAmelCase__ :List[Any] = initializer_range
lowerCAmelCase__ :Dict = relative_attention
lowerCAmelCase__ :Union[str, Any] = max_relative_positions
lowerCAmelCase__ :List[str] = pad_token_id
lowerCAmelCase__ :Optional[int] = position_biased_input
# Backwards compatibility
if type(__UpperCAmelCase ) == str:
lowerCAmelCase__ :Dict = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCAmelCase__ :str = pos_att_type
lowerCAmelCase__ :Dict = vocab_size
lowerCAmelCase__ :Tuple = layer_norm_eps
lowerCAmelCase__ :Optional[Any] = kwargs.get('pooler_hidden_size' , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = pooler_dropout
lowerCAmelCase__ :int = pooler_hidden_act
class _lowerCAmelCase ( a ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ :str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase__ :Tuple = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def snake_case ( self ):
'''simple docstring'''
return 1_2
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = 3 , __UpperCAmelCase = 4_0 , __UpperCAmelCase = 4_0 , __UpperCAmelCase = None , ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = super().generate_dummy_inputs(preprocessor=__UpperCAmelCase , framework=__UpperCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 93 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 1 |
"""simple docstring"""
from __future__ import annotations
def __A (_SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :str = [True] * limit
lowerCAmelCase__ :Optional[int] = False
lowerCAmelCase__ :str = False
lowerCAmelCase__ :Dict = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCAmelCase__ :Union[str, Any] = i * 2
while index < limit:
lowerCAmelCase__ :Dict = False
lowerCAmelCase__ :Any = index + i
lowerCAmelCase__ :List[Any] = [2]
for i in range(3 , _SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(_SCREAMING_SNAKE_CASE )
return primes
def __A (_SCREAMING_SNAKE_CASE = 100_0000 ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Tuple = prime_sieve(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = 0
lowerCAmelCase__ :Any = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(i + length , len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCAmelCase__ :List[str] = j - i
lowerCAmelCase__ :Tuple = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''')
| 93 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 1 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = 1.0 if scale is None else scale
lowerCAmelCase__ :List[Any] = 0.0 if loc is None else loc
super().__init__(__UpperCAmelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCAmelCase )] )
@property
def snake_case ( self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def snake_case ( self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def snake_case ( self ):
'''simple docstring'''
return self.variance.sqrt()
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = args_dim
lowerCAmelCase__ :int = nn.ModuleList([nn.Linear(__UpperCAmelCase , __UpperCAmelCase ) for dim in args_dim.values()] )
lowerCAmelCase__ :Optional[int] = domain_map
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = [proj(__UpperCAmelCase ) for proj in self.proj]
return self.domain_map(*__UpperCAmelCase )
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :Optional[Any] = function
def snake_case ( self , __UpperCAmelCase , *__UpperCAmelCase ):
'''simple docstring'''
return self.function(__UpperCAmelCase , *__UpperCAmelCase )
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :type
__magic_name__ :int
__magic_name__ :Dict[str, int]
def __init__( self , __UpperCAmelCase = 1 ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = dim
lowerCAmelCase__ :Optional[int] = {k: dim * self.args_dim[k] for k in self.args_dim}
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*__UpperCAmelCase )
else:
return Independent(self.distribution_class(*__UpperCAmelCase ) , 1 )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self._base_distribution(__UpperCAmelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__UpperCAmelCase , loc=__UpperCAmelCase , scale=__UpperCAmelCase , event_dim=self.event_dim )
@property
def snake_case ( self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def snake_case ( self ):
'''simple docstring'''
return len(self.event_shape )
@property
def snake_case ( self ):
'''simple docstring'''
return 0.0
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
return ParameterProjection(
in_features=__UpperCAmelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def snake_case ( self , *__UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def snake_case ( __UpperCAmelCase ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(__UpperCAmelCase ) + 4.0 )) / 2.0
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__magic_name__ :type = StudentT
@classmethod
def snake_case ( cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = cls.squareplus(__UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCAmelCase__ :Dict = 2.0 + cls.squareplus(__UpperCAmelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Dict[str, int] = {"loc": 1, "scale": 1}
__magic_name__ :type = Normal
@classmethod
def snake_case ( cls , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = cls.squareplus(__UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Dict[str, int] = {"total_count": 1, "logits": 1}
__magic_name__ :type = NegativeBinomial
@classmethod
def snake_case ( cls , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = cls.squareplus(__UpperCAmelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__UpperCAmelCase , logits=__UpperCAmelCase )
else:
return Independent(self.distribution_class(total_count=__UpperCAmelCase , logits=__UpperCAmelCase ) , 1 )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :str = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 93 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__A = """sshleifer/bart-tiny-random"""
__A = """patrickvonplaten/t5-tiny-random"""
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
'''simple docstring'''
return AutoConfig.from_pretrained(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :List[str] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :Optional[int] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :List[Any] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=__UpperCAmelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :Optional[int] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ):
create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=__UpperCAmelCase , d=__UpperCAmelCase )
| 93 | 1 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def __A () ->None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 93 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
__A = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , ) ->Tuple:
"""simple docstring"""
output_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , use_external_data_format=_SCREAMING_SNAKE_CASE , enable_onnx_checker=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , )
else:
export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCAmelCase__ :Tuple = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
lowerCAmelCase__ :List[Any] = 'cpu'
lowerCAmelCase__ :List[str] = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , torch_dtype=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = Path(_SCREAMING_SNAKE_CASE )
# TEXT ENCODER
lowerCAmelCase__ :str = pipeline.text_encoder.config.max_position_embeddings
lowerCAmelCase__ :Dict = pipeline.text_encoder.config.hidden_size
lowerCAmelCase__ :List[Any] = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_SCREAMING_SNAKE_CASE , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=_SCREAMING_SNAKE_CASE , )
del pipeline.text_encoder
# UNET
lowerCAmelCase__ :int = pipeline.unet.config.in_channels
lowerCAmelCase__ :Optional[Any] = pipeline.unet.config.sample_size
lowerCAmelCase__ :Dict = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(2 ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
False,
) , output_path=_SCREAMING_SNAKE_CASE , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=_SCREAMING_SNAKE_CASE , use_external_data_format=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ :List[Any] = str(unet_path.absolute().as_posix() )
lowerCAmelCase__ :int = os.path.dirname(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = onnx.load(_SCREAMING_SNAKE_CASE )
# clean up existing tensor files
shutil.rmtree(_SCREAMING_SNAKE_CASE )
os.mkdir(_SCREAMING_SNAKE_CASE )
# collate external tensor files into one
onnx.save_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , save_as_external_data=_SCREAMING_SNAKE_CASE , all_tensors_to_one_file=_SCREAMING_SNAKE_CASE , location='weights.pb' , convert_attribute=_SCREAMING_SNAKE_CASE , )
del pipeline.unet
# VAE ENCODER
lowerCAmelCase__ :int = pipeline.vae
lowerCAmelCase__ :Optional[Any] = vae_encoder.config.in_channels
lowerCAmelCase__ :int = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowerCAmelCase__ :str = lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : vae_encoder.encode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0].sample()
onnx_export(
_SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_SCREAMING_SNAKE_CASE , )
# VAE DECODER
lowerCAmelCase__ :int = pipeline.vae
lowerCAmelCase__ :List[Any] = vae_decoder.config.latent_channels
lowerCAmelCase__ :Optional[int] = vae_decoder.config.out_channels
# forward only through the decoder part
lowerCAmelCase__ :Any = vae_encoder.decode
onnx_export(
_SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_SCREAMING_SNAKE_CASE , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowerCAmelCase__ :Optional[int] = pipeline.safety_checker
lowerCAmelCase__ :Optional[int] = safety_checker.config.vision_config.num_channels
lowerCAmelCase__ :Any = safety_checker.config.vision_config.image_size
lowerCAmelCase__ :List[str] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=_SCREAMING_SNAKE_CASE , )
del pipeline.safety_checker
lowerCAmelCase__ :Union[str, Any] = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
lowerCAmelCase__ :Dict = pipeline.feature_extractor
else:
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :Optional[int] = None
lowerCAmelCase__ :List[str] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(_SCREAMING_SNAKE_CASE )
print('ONNX pipeline saved to' , _SCREAMING_SNAKE_CASE )
del pipeline
del onnx_pipeline
lowerCAmelCase__ :Dict = OnnxStableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
__A = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 93 | 1 |
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
if index == r:
for j in range(snake_case ):
print(data[j], end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__magic_name__ :Dict = arr[i]
combination_util(snake_case, snake_case, snake_case, index + 1, snake_case, i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(snake_case, snake_case, snake_case, snake_case, snake_case, i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(snake_case, snake_case, snake_case, 0, snake_case, 0 )
if __name__ == "__main__":
# Driver code to check the function above
SCREAMING_SNAKE_CASE__ : Optional[Any] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 0 |
import math
import sys
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = ''
try:
with open(_lowercase , 'rb' ) as binary_file:
__UpperCamelCase = binary_file.read()
for dat in data:
__UpperCamelCase = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = {'0': '0', '1': '1'}
__UpperCamelCase, __UpperCamelCase = '', ''
__UpperCamelCase = len(_lowercase )
for i in range(len(_lowercase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__UpperCamelCase = lexicon[curr_string]
result += last_match_id
__UpperCamelCase = last_match_id + '0'
if math.loga(_lowercase ).is_integer():
__UpperCamelCase = {}
for curr_key in list(_lowercase ):
__UpperCamelCase = lexicon.pop(_lowercase )
__UpperCamelCase = new_lex
__UpperCamelCase = last_match_id + '1'
index += 1
__UpperCamelCase = ''
return result
def _A ( _lowercase , _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = 8
try:
with open(_lowercase , 'wb' ) as opened_file:
__UpperCamelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(_lowercase ) , _lowercase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_lowercase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _A ( _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__UpperCamelCase = data_bits[counter:]
__UpperCamelCase = data_bits[counter + 1 :]
return data_bits
def _A ( _lowercase , _lowercase ) -> None:
"""simple docstring"""
__UpperCamelCase = read_file_binary(_lowercase )
__UpperCamelCase = remove_prefix(_lowercase )
__UpperCamelCase = decompress_data(_lowercase )
write_file_binary(_lowercase , _lowercase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 1 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[str]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_SCREAMING_SNAKE_CASE ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 | 0 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCAmelCase_ = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> list[int]:
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
_A = []
for num in range(len(_snake_case ) ):
_A = 0
while 2 * i * i <= odd_composites[num]:
_A = odd_composites[num] - 2 * i * i
if is_prime(_snake_case ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_snake_case ) == n:
return list_nums
return []
def SCREAMING_SNAKE_CASE_ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'{solution() = }')
| 2 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = tempfile.mkdtemp()
lowerCAmelCase__ :List[Any] = BlipImageProcessor()
lowerCAmelCase__ :Union[str, Any] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowerCAmelCase__ :Union[str, Any] = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ :List[str] = InstructBlipProcessor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).qformer_tokenizer
def snake_case ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase__ :Dict = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase__ :str = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
lowerCAmelCase__ :Dict = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.get_image_processor()
lowerCAmelCase__ :List[Any] = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase__ :str = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Dict = self.prepare_image_inputs()
lowerCAmelCase__ :List[str] = image_processor(__UpperCAmelCase , return_tensors='np' )
lowerCAmelCase__ :Optional[Any] = processor(images=__UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.get_image_processor()
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase__ :Any = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 'lower newer'
lowerCAmelCase__ :Dict = processor(text=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = qformer_tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.get_image_processor()
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
lowerCAmelCase__ :str = self.get_qformer_tokenizer()
lowerCAmelCase__ :Dict = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = 'lower newer'
lowerCAmelCase__ :Dict = self.prepare_image_inputs()
lowerCAmelCase__ :Tuple = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_image_processor()
lowerCAmelCase__ :Dict = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase__ :int = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ :Tuple = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_image_processor()
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
lowerCAmelCase__ :Dict = self.get_qformer_tokenizer()
lowerCAmelCase__ :Optional[Any] = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 'lower newer'
lowerCAmelCase__ :Optional[int] = self.prepare_image_inputs()
lowerCAmelCase__ :int = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 93 | 0 |
'''simple docstring'''
def A_( A : list[int]):
UpperCamelCase = []
if len(A) == 1:
return [nums.copy()]
for _ in range(len(A)):
UpperCamelCase = nums.pop(0)
UpperCamelCase = permute(A)
for perm in permutations:
perm.append(A)
result.extend(A)
nums.append(A)
return result
def A_( A : str):
def backtrack(A : str):
if start == len(A) - 1:
output.append(nums[:])
else:
for i in range(A , len(A)):
UpperCamelCase , UpperCamelCase = nums[i], nums[start]
backtrack(start + 1)
UpperCamelCase , UpperCamelCase = nums[i], nums[start] # backtrack
UpperCamelCase = []
backtrack(0)
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCAmelCase : Dict = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 3 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__A = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
F" reinstalling {pkg}." )
if not ops[op](version.parse(_SCREAMING_SNAKE_CASE ) , version.parse(_SCREAMING_SNAKE_CASE ) ):
raise ImportError(
F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) ->None:
"""simple docstring"""
lowerCAmelCase__ :List[str] = F"\n{hint}" if hint is not None else ''
# non-versioned check
if re.match(r'^[\w_\-\d]+$' , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = requirement, None, None
else:
lowerCAmelCase__ :List[str] = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , _SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F" got {requirement}" )
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = match[0]
lowerCAmelCase__ :List[Any] = want_full.split(',' ) # there could be multiple requirements
lowerCAmelCase__ :Any = {}
for w in want_range:
lowerCAmelCase__ :Tuple = re.findall(r'^([\s!=<>]{1,2})(.+)' , _SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F" but got {requirement}" )
lowerCAmelCase__ , lowerCAmelCase__ :int = match[0]
lowerCAmelCase__ :str = want_ver
if op not in ops:
raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
lowerCAmelCase__ :Any = '.'.join([str(_SCREAMING_SNAKE_CASE ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return
# check if any version is installed
try:
lowerCAmelCase__ :List[Any] = importlib.metadata.version(_SCREAMING_SNAKE_CASE )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 93 | 0 |
"""simple docstring"""
import argparse
from collections import defaultdict
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] ):
lowerCAmelCase = F'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(_UpperCAmelCase , 'r' ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = F'class {class_name}('
lowerCAmelCase = F'{4 * " "}def {test_name}('
lowerCAmelCase = F'{8 * " "}{correct_line.split()[0]}'
lowerCAmelCase = F'{16 * " "}{correct_line.split()[0]}'
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = []
for line in lines:
if line.startswith(_UpperCAmelCase ):
lowerCAmelCase = True
elif in_class and line.startswith(_UpperCAmelCase ):
lowerCAmelCase = True
elif in_class and in_func and (line.startswith(_UpperCAmelCase ) or line.startswith(_UpperCAmelCase )):
lowerCAmelCase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowerCAmelCase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowerCAmelCase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'{spaces * " "}{correct_line}' )
lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = False
else:
new_lines.append(_UpperCAmelCase )
with open(_UpperCAmelCase , 'w' ) as f:
for line in new_lines:
f.write(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple=None ):
if fail is not None:
with open(_UpperCAmelCase , 'r' ) as f:
lowerCAmelCase = {l.strip() for l in f.readlines()}
else:
lowerCAmelCase = None
with open(_UpperCAmelCase , 'r' ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = defaultdict(_UpperCAmelCase )
for line in correct_lines:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
__UpperCamelCase : Dict = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 4 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :int = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def snake_case ( self , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__UpperCAmelCase ) )
lowerCAmelCase__ :List[str] = np.random.RandomState(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.get_dummy_inputs()
lowerCAmelCase__ :Optional[int] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Union[str, Any] = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :str = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs()
lowerCAmelCase__ :Optional[Any] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :int = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# warmup pass to apply optimizations
lowerCAmelCase__ :List[Any] = pipe(**self.get_dummy_inputs() )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs()
lowerCAmelCase__ :int = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Union[str, Any] = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Any = self.get_dummy_inputs()
lowerCAmelCase__ :List[str] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Optional[int] = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs()
lowerCAmelCase__ :Any = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :int = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Any = self.get_dummy_inputs()
lowerCAmelCase__ :List[Any] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Optional[Any] = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = ort.SessionOptions()
lowerCAmelCase__ :Optional[int] = False
return options
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCAmelCase__ :Any = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
lowerCAmelCase__ :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = 'A fantasy landscape, trending on artstation'
lowerCAmelCase__ :Optional[Any] = np.random.RandomState(0 )
lowerCAmelCase__ :List[str] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__UpperCAmelCase , output_type='np' , )
lowerCAmelCase__ :Any = output.images
lowerCAmelCase__ :List[str] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowerCAmelCase__ :List[Any] = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCAmelCase__ :Optional[Any] = init_image.resize((7_6_8, 5_1_2) )
lowerCAmelCase__ :List[Any] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowerCAmelCase__ :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = 'A fantasy landscape, trending on artstation'
lowerCAmelCase__ :List[Any] = np.random.RandomState(0 )
lowerCAmelCase__ :List[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__UpperCAmelCase , output_type='np' , )
lowerCAmelCase__ :Optional[Any] = output.images
lowerCAmelCase__ :int = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowerCAmelCase__ :List[Any] = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 93 | 0 |
'''simple docstring'''
def A (__lowerCamelCase :Any , __lowerCamelCase :str , __lowerCamelCase :List[Any] , __lowerCamelCase :int , __lowerCamelCase :Tuple , __lowerCamelCase :Any ):
if index == r:
for j in range(__lowerCamelCase ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_lowerCAmelCase = arr[i]
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 , __lowerCamelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :str , __lowerCamelCase :int ):
# A temporary array to store all combination one by one
_lowerCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 0 , __lowerCamelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_lowercase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 5 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__A = ["""gpt2"""]
__A = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :List[str] = tokenizer
lowerCAmelCase__ :Optional[int] = AutoConfig.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = TFGPTaLMHeadModel.from_config(__UpperCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = self.tokenizer(__UpperCAmelCase )
lowerCAmelCase__ :int = tokenized['input_ids'].to_tensor()
lowerCAmelCase__ :Optional[Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCAmelCase__ :int = self.model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ :List[str] = [GPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCAmelCase__ :Union[str, Any] = [TFGPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCAmelCase__ :int = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
lowerCAmelCase__ :str = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def snake_case ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowerCAmelCase__ :int = tokenizer([test_inputs] , return_tensors='tf' )
lowerCAmelCase__ :Optional[int] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCAmelCase__ :Optional[int] = python_outputs[key].numpy()
lowerCAmelCase__ :List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__UpperCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase__ :Optional[int] = tf.function(__UpperCAmelCase )
for test_inputs in self.test_sentences:
lowerCAmelCase__ :Any = tf.constant(__UpperCAmelCase )
lowerCAmelCase__ :int = compiled_tokenizer(__UpperCAmelCase )
lowerCAmelCase__ :Any = tf_tokenizer(__UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase__ :List[str] = ModelToSave(tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase__ :List[str] = model.serving(__UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCAmelCase__ :Union[str, Any] = Path(__UpperCAmelCase ) / 'saved.model'
tf.saved_model.save(__UpperCAmelCase , __UpperCAmelCase , signatures={'serving_default': model.serving} )
lowerCAmelCase__ :str = tf.saved_model.load(__UpperCAmelCase )
lowerCAmelCase__ :Dict = loaded_model.signatures['serving_default'](__UpperCAmelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase__ :List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase__ :List[str] = tf_tokenizer(__UpperCAmelCase ) # Build model with some sample inputs
lowerCAmelCase__ :Union[str, Any] = tf_tokenizer.get_config()
lowerCAmelCase__ :Tuple = TFGPTaTokenizer.from_config(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = model_from_config(__UpperCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCAmelCase__ :int = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
lowerCAmelCase__ :Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase__ :List[str] = tf_tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 93 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "encoder-decoder"
lowerCamelCase_ = True
def __init__( self :Optional[int] , **__A :str ) -> int:
"""simple docstring"""
super().__init__(**__A )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = True
@classmethod
def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A )
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.encoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.decoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output | 6 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __A (_SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = 384
if "tiny" in model_name:
lowerCAmelCase__ :List[Any] = [3, 3, 9, 3]
lowerCAmelCase__ :Tuple = [96, 192, 384, 768]
if "small" in model_name:
lowerCAmelCase__ :Union[str, Any] = [3, 3, 27, 3]
lowerCAmelCase__ :Any = [96, 192, 384, 768]
if "base" in model_name:
lowerCAmelCase__ :Dict = [3, 3, 27, 3]
lowerCAmelCase__ :Any = [128, 256, 512, 1024]
lowerCAmelCase__ :Union[str, Any] = 512
if "large" in model_name:
lowerCAmelCase__ :int = [3, 3, 27, 3]
lowerCAmelCase__ :Any = [192, 384, 768, 1536]
lowerCAmelCase__ :Optional[Any] = 768
if "xlarge" in model_name:
lowerCAmelCase__ :Optional[Any] = [3, 3, 27, 3]
lowerCAmelCase__ :str = [256, 512, 1024, 2048]
lowerCAmelCase__ :Union[str, Any] = 1024
# set label information
lowerCAmelCase__ :Tuple = 150
lowerCAmelCase__ :List[Any] = 'huggingface/label-files'
lowerCAmelCase__ :Tuple = 'ade20k-id2label.json'
lowerCAmelCase__ :Tuple = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ :Dict = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase__ :int = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ :List[str] = ConvNextConfig(
depths=_SCREAMING_SNAKE_CASE , hidden_sizes=_SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
lowerCAmelCase__ :Union[str, Any] = UperNetConfig(
backbone_config=_SCREAMING_SNAKE_CASE , auxiliary_in_channels=_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE , )
return config
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :str = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = val
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Dict = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
lowerCAmelCase__ :List[Any] = model_name_to_url[model_name]
lowerCAmelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' )['state_dict']
lowerCAmelCase__ :List[Any] = get_upernet_config(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = UperNetForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCAmelCase__ :Optional[int] = state_dict.pop(_SCREAMING_SNAKE_CASE )
if "bn" in key:
lowerCAmelCase__ :Any = key.replace('bn' , 'batch_norm' )
lowerCAmelCase__ :int = val
# rename keys
lowerCAmelCase__ :Optional[Any] = create_rename_keys(_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify on image
lowerCAmelCase__ :str = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
lowerCAmelCase__ :Optional[Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
lowerCAmelCase__ :Tuple = SegformerImageProcessor()
lowerCAmelCase__ :List[Any] = processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
lowerCAmelCase__ :Optional[Any] = model(_SCREAMING_SNAKE_CASE )
if model_name == "upernet-convnext-tiny":
lowerCAmelCase__ :Optional[Any] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
lowerCAmelCase__ :Union[str, Any] = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
lowerCAmelCase__ :Dict = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
lowerCAmelCase__ :List[str] = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
lowerCAmelCase__ :Optional[Any] = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F'''upernet-convnext-{size}''' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 93 | 0 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def _snake_case ( _snake_case : int ) -> int:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
_A = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_snake_case )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(_snake_case ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__A = logging.get_logger(__name__)
__A = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__A = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
__A = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = VOCAB_FILES_NAMES
__magic_name__ :List[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ :str = ["""input_ids""", """attention_mask"""]
__magic_name__ :Any = RobertaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Optional[int] = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
lowerCAmelCase__ :List[Any] = add_prefix_space
lowerCAmelCase__ :str = pre_tok_class(**__UpperCAmelCase )
lowerCAmelCase__ :List[str] = add_prefix_space
lowerCAmelCase__ :str = 'post_processor'
lowerCAmelCase__ :Optional[Any] = getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
if tokenizer_component_instance:
lowerCAmelCase__ :Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase__ :Any = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase__ :int = tuple(state['cls'] )
lowerCAmelCase__ :List[Any] = False
if state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Union[str, Any] = add_prefix_space
lowerCAmelCase__ :Any = True
if state.get('trim_offsets' , __UpperCAmelCase ) != trim_offsets:
lowerCAmelCase__ :Union[str, Any] = trim_offsets
lowerCAmelCase__ :Optional[int] = True
if changes_to_apply:
lowerCAmelCase__ :str = getattr(__UpperCAmelCase , state.pop('type' ) )
lowerCAmelCase__ :Any = component_class(**__UpperCAmelCase )
setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
@property
def snake_case ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else value
lowerCAmelCase__ :List[str] = value
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = [self.sep_token_id]
lowerCAmelCase__ :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 93 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _lowerCAmelCase ( __snake_case : list[float] ) -> Optional[int]:
return np.maximum(0 , __snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5] | 8 |
"""simple docstring"""
from __future__ import annotations
import math
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__A = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def __A (_SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
lowerCAmelCase__ :Tuple = []
for num in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :int = 0
while 2 * i * i <= odd_composites[num]:
lowerCAmelCase__ :int = odd_composites[num] - 2 * i * i
if is_prime(_SCREAMING_SNAKE_CASE ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_SCREAMING_SNAKE_CASE ) == n:
return list_nums
return []
def __A () ->int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 93 | 0 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : Dict , _snake_case : Union[str, Any]=13 , _snake_case : Tuple=64 , _snake_case : Any=2 , _snake_case : Optional[Any]=3 , _snake_case : Any=True , _snake_case : Dict=True , _snake_case : Optional[int]=32 , _snake_case : List[str]=5 , _snake_case : Union[str, Any]=4 , _snake_case : str=37 , _snake_case : str="gelu" , _snake_case : Dict=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Union[str, Any]=10 , _snake_case : Optional[int]=0.02 , _snake_case : Optional[Any]=[1, 16, 4, 4] , _snake_case : Optional[Any]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
A__ = (self.image_size // 32) ** 2
A__ = num_patches + 1
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : Any ):
"""simple docstring"""
A__ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , )
def _a ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int] , _snake_case : int ):
"""simple docstring"""
A__ = ViTHybridModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : int , _snake_case : Dict , _snake_case : Dict , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self.type_sequence_label_size
A__ = ViTHybridForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A__ : str = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A__ : int = False
A__ : Dict = False
A__ : Tuple = False
def _a ( self : List[str] ):
"""simple docstring"""
A__ = ViTHybridModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self : Optional[int] ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def _a ( self : Dict ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
A__ = model_class(config=_snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
A__ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _a ( self : Any ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTHybridModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> Optional[Any]:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : int ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_snake_case )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
A__ = model(**_snake_case )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self : str ):
"""simple docstring"""
A__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
A__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = model(**_snake_case )
A__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
A__ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 9 |
"""simple docstring"""
import re
def __A (_SCREAMING_SNAKE_CASE ) ->list:
"""simple docstring"""
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )]
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
try:
lowerCAmelCase__ :Any = split_input(_SCREAMING_SNAKE_CASE )
if upper:
lowerCAmelCase__ :str = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCAmelCase__ :int = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_simple_case(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
try:
lowerCAmelCase__ :str = to_simple_case(_SCREAMING_SNAKE_CASE )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '_' )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '-' )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 93 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "segformer"
def __init__( self : Optional[Any] , _A : List[Any]=3 , _A : Tuple=4 , _A : Any=[2, 2, 2, 2] , _A : Dict=[8, 4, 2, 1] , _A : int=[32, 64, 160, 256] , _A : Any=[7, 3, 3, 3] , _A : Any=[4, 2, 2, 2] , _A : str=[1, 2, 5, 8] , _A : Optional[Any]=[4, 4, 4, 4] , _A : Dict="gelu" , _A : Optional[Any]=0.0 , _A : Any=0.0 , _A : Tuple=0.1 , _A : Optional[Any]=0.02 , _A : Dict=0.1 , _A : Dict=1e-6 , _A : int=256 , _A : Dict=255 , **_A : List[Any] , ):
super().__init__(**_A )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , _A , )
_UpperCamelCase = num_channels
_UpperCamelCase = num_encoder_blocks
_UpperCamelCase = depths
_UpperCamelCase = sr_ratios
_UpperCamelCase = hidden_sizes
_UpperCamelCase = patch_sizes
_UpperCamelCase = strides
_UpperCamelCase = mlp_ratios
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = classifier_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = drop_path_rate
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = decoder_hidden_size
_UpperCamelCase = kwargs.get('''reshape_last_stage''' , _A )
_UpperCamelCase = semantic_loss_ignore_index
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = version.parse("1.11" )
@property
def UpperCamelCase_ ( self : Any ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase_ ( self : str ):
return 1e-4
@property
def UpperCamelCase_ ( self : int ):
return 12
| 10 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
__A = """"""
__A = """"""
__A = """"""
__A = """"""
def __A (_SCREAMING_SNAKE_CASE ) ->None:
"""simple docstring"""
lowerCAmelCase__ :Any = tweepy.OAuthHandler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
auth.set_access_token(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = tweepy.API(_SCREAMING_SNAKE_CASE )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase__ :Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase__ :Optional[Any] = api.user_timeline(screen_name=_SCREAMING_SNAKE_CASE , count=200 )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# save the id of the oldest tweet less one
lowerCAmelCase__ :Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_SCREAMING_SNAKE_CASE ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase__ :Union[str, Any] = api.user_timeline(
screen_name=_SCREAMING_SNAKE_CASE , count=200 , max_id=_SCREAMING_SNAKE_CASE )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# update the id of the oldest tweet less one
lowerCAmelCase__ :Tuple = alltweets[-1].id - 1
print(F"...{len(_SCREAMING_SNAKE_CASE )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase__ :Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , 'w' ) as f:
lowerCAmelCase__ :List[str] = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 93 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCAmelCase (__A):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_a = name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''')
if "img_encoder.patch_embed.proj" in name:
_a = name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''')
if "img_encoder.patch_embed.norm" in name:
_a = name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''')
if "img_encoder.layers" in name:
_a = name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''')
if "blocks" in name and "res" not in name:
_a = name.replace('''blocks''' , '''layers''')
if "attn" in name and "pre_assign" not in name:
_a = name.replace('''attn''' , '''self_attn''')
if "proj" in name and "self_attn" in name and "text" not in name:
_a = name.replace('''proj''' , '''out_proj''')
if "pre_assign_attn.attn.proj" in name:
_a = name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''')
if "norm1" in name:
_a = name.replace('''norm1''' , '''layer_norm1''')
if "norm2" in name and "pre_assign" not in name:
_a = name.replace('''norm2''' , '''layer_norm2''')
if "img_encoder.norm" in name:
_a = name.replace('''img_encoder.norm''' , '''vision_model.layernorm''')
# text encoder
if "text_encoder.token_embedding" in name:
_a = name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''')
if "text_encoder.positional_embedding" in name:
_a = name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''')
if "text_encoder.transformer.resblocks." in name:
_a = name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''')
if "ln_1" in name:
_a = name.replace('''ln_1''' , '''layer_norm1''')
if "ln_2" in name:
_a = name.replace('''ln_2''' , '''layer_norm2''')
if "c_fc" in name:
_a = name.replace('''c_fc''' , '''fc1''')
if "c_proj" in name:
_a = name.replace('''c_proj''' , '''fc2''')
if "text_encoder" in name:
_a = name.replace('''text_encoder''' , '''text_model''')
if "ln_final" in name:
_a = name.replace('''ln_final''' , '''final_layer_norm''')
# projection layers
if "img_projector.linear_hidden." in name:
_a = name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''')
if "img_projector.linear_out." in name:
_a = name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''')
if "text_projector.linear_hidden" in name:
_a = name.replace('''text_projector.linear_hidden''' , '''text_projection''')
if "text_projector.linear_out" in name:
_a = name.replace('''text_projector.linear_out''' , '''text_projection.3''')
return name
def lowerCAmelCase (__A , __A):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(__A)
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_a = key.split('''.''')
_a , _a = int(key_split[2]), int(key_split[4])
_a = config.vision_config.hidden_size
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_a = key.split('''.''')
_a = int(key_split[3])
_a = config.text_config.hidden_size
if "weight" in key:
_a = val[:dim, :]
_a = val[
dim : dim * 2, :
]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
else:
_a = rename_key(__A)
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_a = val.squeeze_()
else:
_a = val
return orig_state_dict
def lowerCAmelCase ():
"""simple docstring"""
_a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a = Image.open(requests.get(__A , stream=__A).raw)
return im
@torch.no_grad()
def lowerCAmelCase (__A , __A , __A="groupvit-gcc-yfcc" , __A=False):
"""simple docstring"""
_a = GroupViTConfig()
_a = GroupViTModel(__A).eval()
_a = torch.load(__A , map_location='''cpu''')['''model''']
_a = convert_state_dict(__A , __A)
_a , _a = model.load_state_dict(__A , strict=__A)
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__A) == 0)
# verify result
_a = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''')
_a = prepare_img()
_a = processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=__A , padding=__A , return_tensors='''pt''')
with torch.no_grad():
_a = model(**__A)
if model_name == "groupvit-gcc-yfcc":
_a = torch.tensor([[13.35_23, 6.36_29]])
elif model_name == "groupvit-gcc-redcaps":
_a = torch.tensor([[16.18_73, 8.62_30]])
else:
raise ValueError(F'''Model name {model_name} not supported.''')
assert torch.allclose(outputs.logits_per_image , __A , atol=1e-3)
processor.save_pretrained(__A)
model.save_pretrained(__A)
print('''Successfully saved processor and model to''' , __A)
if push_to_hub:
print('''Pushing to the hub...''')
processor.push_to_hub(__A , organization='''nielsr''')
model.push_to_hub(__A , organization='''nielsr''')
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
lowercase_ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 11 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCAmelCase ( a , a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :int = StableUnCLIPPipeline
__magic_name__ :int = TEXT_TO_IMAGE_PARAMS
__magic_name__ :List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ :Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__ :Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__magic_name__ :List[str] = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 3_2
lowerCAmelCase__ :List[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=__UpperCAmelCase , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ :int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=__UpperCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=__UpperCAmelCase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase__ :Dict = StableUnCLIPImageNormalizer(embedding_dim=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase__ :str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__UpperCAmelCase , layers_per_block=1 , upcast_attention=__UpperCAmelCase , use_linear_projection=__UpperCAmelCase , )
torch.manual_seed(0 )
lowerCAmelCase__ :Tuple = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = AutoencoderKL()
lowerCAmelCase__ :Optional[int] = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :str = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[int] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__UpperCAmelCase )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
lowerCAmelCase__ :List[Any] = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ :List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ :List[str] = pipe('anime turle' , generator=__UpperCAmelCase , output_type='np' )
lowerCAmelCase__ :Union[str, Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ :int = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
lowerCAmelCase__ :List[str] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ :Tuple = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ :str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 93 | 0 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : BigBirdConfig
__lowerCAmelCase : jnp.dtype = jnp.floataa
__lowerCAmelCase : bool = True
def lowercase__ ( self):
'''simple docstring'''
super().setup()
lowercase__ : Dict = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[str] = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = FlaxBigBirdForNaturalQuestionsModule
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
def cross_entropy(lowercase_ , lowercase_ , lowercase_=None ):
lowercase__ : int = logits.shape[-1]
lowercase__ : List[str] = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("""f4""" )
lowercase__ : int = jax.nn.log_softmax(lowercase_ , axis=-1 )
lowercase__ : Any = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase__ : Optional[int] = reduction(lowercase_ )
return loss
lowercase__ : int = partial(lowercase_ , reduction=jnp.mean )
lowercase__ : Tuple = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : List[Any] = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = cross_entropy(lowercase_ , lowercase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _snake_case :
__lowerCAmelCase : str = "google/bigbird-roberta-base"
__lowerCAmelCase : int = 3_000
__lowerCAmelCase : int = 10_500
__lowerCAmelCase : int = 128
__lowerCAmelCase : int = 3
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 5
# tx_args
__lowerCAmelCase : float = 3e-5
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 20_000
__lowerCAmelCase : float = 0.0_095
__lowerCAmelCase : str = "bigbird-roberta-natural-questions"
__lowerCAmelCase : str = "training-expt"
__lowerCAmelCase : str = "data/nq-training.jsonl"
__lowerCAmelCase : str = "data/nq-validation.jsonl"
def lowercase__ ( self):
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = os.path.join(self.base_dir , self.save_dir)
lowercase__ : str = self.batch_size_per_device * jax.device_count()
@dataclass
class _snake_case :
__lowerCAmelCase : int
__lowerCAmelCase : int = 4_096 # no dynamic padding on TPUs
def __call__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = self.collate_fn(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.fetch_inputs(features["""input_ids"""])
lowercase__ : str = {
"""input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa),
}
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = [self._fetch_inputs(SCREAMING_SNAKE_CASE_) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = [1 for _ in range(len(SCREAMING_SNAKE_CASE_))]
while len(SCREAMING_SNAKE_CASE_) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
if seed is not None:
lowercase__ : Any = dataset.shuffle(seed=lowercase_ )
for i in range(len(lowercase_ ) // batch_size ):
lowercase__ : List[str] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowercase_ )
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , lowercase_ , **lowercase_ ) -> int:
'''simple docstring'''
def loss_fn(lowercase_ ):
lowercase__ : Dict = model_inputs.pop("""start_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""end_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Any = outputs
return state.loss_fn(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
lowercase__ , lowercase__ : Optional[int] = jax.random.split(lowercase_ )
lowercase__ : Tuple = jax.value_and_grad(lowercase_ )
lowercase__ , lowercase__ : Optional[int] = grad_fn(state.params )
lowercase__ : Tuple = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
lowercase__ : Any = jax.lax.pmean(lowercase_ , """batch""" )
lowercase__ : str = state.apply_gradients(grads=lowercase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , **lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Tuple = model_inputs.pop("""start_labels""" )
lowercase__ : List[str] = model_inputs.pop("""end_labels""" )
lowercase__ : int = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Optional[int] = outputs
lowercase__ : Optional[Any] = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class _snake_case ( train_state.TrainState ):
__lowerCAmelCase : Callable = struct.field(pytree_node=UpperCAmelCase_ )
@dataclass
class _snake_case :
__lowerCAmelCase : Args
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : wandb
__lowerCAmelCase : Callable = None
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : List[str] = model.params
lowercase__ : Dict = TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , )
if ckpt_dir is not None:
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
lowercase__ , lowercase__ : Any = build_tx(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = train_state.TrainState(
step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Optional[Any] = args
lowercase__ : Union[str, Any] = data_collator
lowercase__ : str = lr
lowercase__ : Union[str, Any] = params
lowercase__ : Dict = jax_utils.replicate(SCREAMING_SNAKE_CASE_)
return state
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = self.args
lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE_) // args.batch_size
lowercase__ : int = jax.random.PRNGKey(0)
lowercase__ : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count())
for epoch in range(args.max_epochs):
lowercase__ : Tuple = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : List[str] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=f'Running EPOCH-{epoch}'):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ , lowercase__ : List[Any] = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
if i % args.logging_steps == 0:
lowercase__ : List[str] = jax_utils.unreplicate(state.step)
lowercase__ : str = running_loss.item() / i
lowercase__ : Tuple = self.scheduler_fn(state_step - 1)
lowercase__ : Tuple = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE_))
self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size)
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_) // self.args.batch_size
lowercase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : Optional[Any] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="""Evaluating ... """):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
return running_loss / i
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_)
print(f'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """)
self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params)
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """opt_state.msgpack""") , """wb""") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , """args.joblib"""))
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , """data_collator.joblib"""))
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """training_state.json""") , """w""") as f:
json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE_)
print("""DONE""")
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ )
with open(os.path.join(lowercase_ , """flax_model.msgpack""" ) , """rb""" ) as f:
lowercase__ : Optional[Any] = from_bytes(state.params , f.read() )
with open(os.path.join(lowercase_ , """opt_state.msgpack""" ) , """rb""" ) as f:
lowercase__ : Dict = from_bytes(state.opt_state , f.read() )
lowercase__ : Any = joblib.load(os.path.join(lowercase_ , """args.joblib""" ) )
lowercase__ : Optional[int] = joblib.load(os.path.join(lowercase_ , """data_collator.joblib""" ) )
with open(os.path.join(lowercase_ , """training_state.json""" ) , """r""" ) as f:
lowercase__ : int = json.load(lowercase_ )
lowercase__ : Optional[Any] = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Optional[int] = num_train_steps - warmup_steps
lowercase__ : int = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ )
lowercase__ : Optional[int] = optax.linear_schedule(init_value=lowercase_ , end_value=1E-7 , transition_steps=lowercase_ )
lowercase__ : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
def weight_decay_mask(lowercase_ ):
lowercase__ : Dict = traverse_util.flatten_dict(lowercase_ )
lowercase__ : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowercase_ )
lowercase__ : Optional[int] = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : int = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ )
return tx, lr
| 12 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
__A = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[str] = field(
default=a , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(a )} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """The input training data file (a text file)."""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Whether ot not to use whole word mask."""} )
__magic_name__ :float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
__magic_name__ :float = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
__magic_name__ :int = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
__magic_name__ :int = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) ->Optional[int]:
"""simple docstring"""
def _dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , ref_path=_SCREAMING_SNAKE_CASE , )
return LineByLineTextDataset(tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_SCREAMING_SNAKE_CASE , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_SCREAMING_SNAKE_CASE ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCAmelCase__ :Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ :List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowerCAmelCase__ :List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
lowerCAmelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ :str = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
lowerCAmelCase__ :Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
lowerCAmelCase__ :int = AutoModelWithLMHead.from_config(_SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(_SCREAMING_SNAKE_CASE ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
lowerCAmelCase__ :Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCAmelCase__ :Any = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowerCAmelCase__ :List[str] = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCAmelCase__ :Optional[int] = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , evaluate=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCAmelCase__ :str = DataCollatorForPermutationLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCAmelCase__ :Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
else:
lowerCAmelCase__ :str = DataCollatorForLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase__ :Tuple = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , prediction_loss_only=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowerCAmelCase__ :Tuple = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_SCREAMING_SNAKE_CASE )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ :Optional[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase__ :Any = trainer.evaluate()
lowerCAmelCase__ :Optional[Any] = math.exp(eval_output['eval_loss'] )
lowerCAmelCase__ :Dict = {'perplexity': perplexity}
lowerCAmelCase__ :List[Any] = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(_SCREAMING_SNAKE_CASE )
return results
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 93 | 0 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def UpperCAmelCase__ ( UpperCAmelCase_ : float ) -> float:
if num <= 0:
raise ValueError('math domain error' )
return quad(UpperCAmelCase_ , 0 , UpperCAmelCase_ , args=(UpperCAmelCase_) )[0]
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float:
return math.pow(UpperCAmelCase_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 13 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :torch.FloatTensor
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
@register_to_config
def __init__( self , __UpperCAmelCase = 1_6 , __UpperCAmelCase = 8_8 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 3_2 , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = "geglu" , __UpperCAmelCase = True , __UpperCAmelCase = True , ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :Dict = num_attention_heads
lowerCAmelCase__ :Any = attention_head_dim
lowerCAmelCase__ :Optional[int] = num_attention_heads * attention_head_dim
lowerCAmelCase__ :Any = in_channels
lowerCAmelCase__ :str = torch.nn.GroupNorm(num_groups=__UpperCAmelCase , num_channels=__UpperCAmelCase , eps=1E-6 , affine=__UpperCAmelCase )
lowerCAmelCase__ :int = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
# 3. Define transformers blocks
lowerCAmelCase__ :List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , dropout=__UpperCAmelCase , cross_attention_dim=__UpperCAmelCase , activation_fn=__UpperCAmelCase , attention_bias=__UpperCAmelCase , double_self_attention=__UpperCAmelCase , norm_elementwise_affine=__UpperCAmelCase , )
for d in range(__UpperCAmelCase )
] )
lowerCAmelCase__ :List[Any] = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase = True , ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = hidden_states.shape
lowerCAmelCase__ :Tuple = batch_frames // num_frames
lowerCAmelCase__ :str = hidden_states
lowerCAmelCase__ :Union[str, Any] = hidden_states[None, :].reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :str = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCAmelCase__ :Optional[int] = self.norm(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = self.proj_in(__UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowerCAmelCase__ :Optional[int] = block(
__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase , cross_attention_kwargs=__UpperCAmelCase , class_labels=__UpperCAmelCase , )
# 3. Output
lowerCAmelCase__ :Any = self.proj_out(__UpperCAmelCase )
lowerCAmelCase__ :Dict = (
hidden_states[None, None, :]
.reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCAmelCase__ :Optional[Any] = hidden_states.reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__UpperCAmelCase )
| 93 | 0 |
import heapq
import sys
import numpy as np
a__ = tuple[int, int]
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> Dict:
_a : str = []
_a : int = set()
def __lowercase ( self ) -> Union[str, Any]:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def __lowercase ( self ) -> Tuple:
return len(self.elements ) == 0
def __lowercase ( self , _a , _a ) -> Optional[int]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_a )
else:
# update
# print("update", item)
_a : Optional[Any] = []
((_a) , (_a)) : Optional[int] = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((_a) , (_a)) : Union[str, Any] = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def __lowercase ( self , _a ) -> List[Any]:
if item in self.set:
self.set.remove(_a )
_a : Optional[Any] = []
((_a) , (_a)) : Union[str, Any] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((_a) , (_a)) : Optional[int] = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def __lowercase ( self ) -> Tuple:
return self.elements[0][1]
def __lowercase ( self ) -> int:
((_a) , (_a)) : List[Any] = heapq.heappop(self.elements )
self.set.remove(_a )
return (priority, item)
def __UpperCAmelCase ( __a : TPos ,__a : TPos ) -> Optional[Any]:
"""simple docstring"""
_a : int = np.array(__a )
_a : int = np.array(__a )
return np.linalg.norm(a - b )
def __UpperCAmelCase ( __a : TPos ,__a : TPos ) -> List[Any]:
"""simple docstring"""
return consistent_heuristic(__a ,__a ) // t
def __UpperCAmelCase ( __a : TPos ,__a : TPos ) -> str:
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __UpperCAmelCase ( __a : TPos ,__a : int ,__a : TPos ,__a : dict[TPos, float] ) -> Optional[int]:
"""simple docstring"""
_a : Optional[int] = g_function[start] + Wa * heuristics[i](__a ,__a )
return ans
def __UpperCAmelCase ( __a : Any ,__a : Any ,__a : Dict ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[Any] = np.chararray((n, n) )
for i in range(__a ):
for j in range(__a ):
_a : Any = '''*'''
for i in range(__a ):
for j in range(__a ):
if (j, (n - 1) - i) in blocks:
_a : Dict = '''#'''
_a : Optional[int] = '''-'''
_a : List[str] = back_pointer[goal]
while x != start:
((_a) , (_a)) : int = x
# print(x)
_a : Dict = '''-'''
_a : Tuple = back_pointer[x]
_a : Optional[int] = '''-'''
for i in range(__a ):
for j in range(__a ):
if (i, j) == (0, n - 1):
print(grid[i][j] ,end=''' ''' )
print('''<-- End position''' ,end=''' ''' )
else:
print(grid[i][j] ,end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
_a : str = back_pointer[goal]
while x != start:
print(__a ,end=''' ''' )
_a : str = back_pointer[x]
print(__a )
sys.exit()
def __UpperCAmelCase ( __a : TPos ) -> List[str]:
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ,__a : List[str] ,__a : Any ,__a : Dict ,__a : Optional[Any] ,__a : List[Any] ,__a : Any ,) -> Optional[Any]:
"""simple docstring"""
for itera in range(__a ):
open_list[itera].remove_element(__a )
# print("s", s)
# print("j", j)
((_a) , (_a)) : Optional[Any] = s
_a : Any = (x - 1, y)
_a : Dict = (x + 1, y)
_a : str = (x, y + 1)
_a : str = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__a ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__a )
_a : Optional[int] = -1
_a : Union[str, Any] = float('''inf''' )
if valid(__a ) and g_function[neighbours] > g_function[s] + 1:
_a : Dict = g_function[s] + 1
_a : Optional[Any] = s
if neighbours not in close_list_anchor:
open_list[0].put(__a ,key(__a ,0 ,__a ,__a ) )
if neighbours not in close_list_inad:
for var in range(1 ,__a ):
if key(__a ,__a ,__a ,__a ) <= Wa * key(
__a ,0 ,__a ,__a ):
open_list[j].put(
__a ,key(__a ,__a ,__a ,__a ) )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : Optional[Any] = []
for x in range(1 ,5 ):
for y in range(1 ,6 ):
some_list.append((x, y) )
for x in range(15 ,20 ):
some_list.append((x, 17) )
for x in range(10 ,19 ):
for y in range(1 ,15 ):
some_list.append((x, y) )
# L block
for x in range(1 ,4 ):
for y in range(12 ,19 ):
some_list.append((x, y) )
for x in range(3 ,13 ):
for y in range(16 ,19 ):
some_list.append((x, y) )
return some_list
a__ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a__ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a__ = make_common_ground()
a__ = blocks_blk
# hyper parameters
a__ = 1
a__ = 1
a__ = 20
a__ = 3 # one consistent and two other inconsistent
# start and end destination
a__ = (0, 0)
a__ = (n - 1, n - 1)
a__ = 1
def __UpperCAmelCase ( __a : TPos ,__a : TPos ,__a : int ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = {start: 0, goal: float('''inf''' )}
_a : int = {start: -1, goal: -1}
_a : Union[str, Any] = []
_a : Optional[int] = set()
for i in range(__a ):
open_list.append(PriorityQueue() )
open_list[i].put(__a ,key(__a ,__a ,__a ,__a ) )
_a : list[int] = []
_a : list[int] = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 ,__a ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__a ,__a ,__a )
else:
_a , _a : Any = open_list[i].top_show()
visited.add(__a )
expand_state(
__a ,__a ,__a ,__a ,__a ,__a ,__a ,__a ,)
close_list_inad.append(__a )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__a ,__a ,__a )
else:
_a : Tuple = open_list[0].top_show()
visited.add(__a )
expand_state(
__a ,0 ,__a ,__a ,__a ,__a ,__a ,__a ,)
close_list_anchor.append(__a )
print('''No path found to goal''' )
print()
for i in range(n - 1 ,-1 ,-1 ):
for j in range(__a ):
if (j, i) in blocks:
print('''#''' ,end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' ,end=''' ''' )
else:
print('''-''' ,end=''' ''' )
else:
print('''*''' ,end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' ,end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 14 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=3_0 , __UpperCAmelCase=4_0_0 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 2_5_5 , __UpperCAmelCase=True , ):
'''simple docstring'''
lowerCAmelCase__ :Any = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
lowerCAmelCase__ :List[Any] = parent
lowerCAmelCase__ :int = batch_size
lowerCAmelCase__ :Union[str, Any] = num_channels
lowerCAmelCase__ :Any = min_resolution
lowerCAmelCase__ :Dict = max_resolution
lowerCAmelCase__ :Dict = do_resize
lowerCAmelCase__ :Optional[Any] = size
lowerCAmelCase__ :List[str] = do_normalize
lowerCAmelCase__ :str = image_mean
lowerCAmelCase__ :Tuple = image_std
lowerCAmelCase__ :Dict = do_rescale
lowerCAmelCase__ :Tuple = rescale_factor
lowerCAmelCase__ :Optional[int] = do_pad
def snake_case ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if not batched:
lowerCAmelCase__ :str = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = image.size
else:
lowerCAmelCase__ , lowerCAmelCase__ :str = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ :int = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase__ :List[str] = self.size['shortest_edge']
elif w > h:
lowerCAmelCase__ :Union[str, Any] = self.size['shortest_edge']
lowerCAmelCase__ :Any = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase__ :int = self.size['shortest_edge']
lowerCAmelCase__ :Union[str, Any] = self.size['shortest_edge']
else:
lowerCAmelCase__ :Optional[Any] = []
for image in image_inputs:
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ :List[str] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
lowerCAmelCase__ :List[Any] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Any = DetaImageProcessor if is_vision_available() else None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = DetaImageProcessingTester(self )
@property
def snake_case ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_rescale' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_pad' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'size' ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
lowerCAmelCase__ :Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
lowerCAmelCase__ :List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ :Tuple = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase__ :Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ :str = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ :Dict = json.loads(f.read() )
lowerCAmelCase__ :int = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
lowerCAmelCase__ :int = DetaImageProcessor()
lowerCAmelCase__ :List[Any] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ :str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ :Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCAmelCase ) )
# verify boxes
lowerCAmelCase__ :Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Dict = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ :Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCAmelCase ) )
# verify is_crowd
lowerCAmelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCAmelCase ) )
# verify class_labels
lowerCAmelCase__ :Optional[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCAmelCase ) )
# verify orig_size
lowerCAmelCase__ :str = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCAmelCase ) )
# verify size
lowerCAmelCase__ :Any = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCAmelCase ) )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ :Dict = json.loads(f.read() )
lowerCAmelCase__ :Dict = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
lowerCAmelCase__ :Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCAmelCase__ :Dict = DetaImageProcessor(format='coco_panoptic' )
lowerCAmelCase__ :Optional[int] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ :str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ :Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCAmelCase ) )
# verify boxes
lowerCAmelCase__ :int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ :Optional[int] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCAmelCase ) )
# verify is_crowd
lowerCAmelCase__ :Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCAmelCase ) )
# verify class_labels
lowerCAmelCase__ :List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCAmelCase ) )
# verify masks
lowerCAmelCase__ :Optional[int] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __UpperCAmelCase )
# verify orig_size
lowerCAmelCase__ :Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCAmelCase ) )
# verify size
lowerCAmelCase__ :Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCAmelCase ) )
| 93 | 0 |
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : list ) -> str:
"""simple docstring"""
_enforce_args(__magic_name__ , __magic_name__ )
if n == 0:
return 0
lowercase__ = float("""-inf""" )
for i in range(1 , n + 1 ):
lowercase__ = max(
__magic_name__ , prices[i - 1] + naive_cut_rod_recursive(n - i , __magic_name__ ) )
return max_revue
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : list ) -> Optional[int]:
"""simple docstring"""
_enforce_args(__magic_name__ , __magic_name__ )
lowercase__ = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__magic_name__ , __magic_name__ , __magic_name__ )
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : list , __magic_name__ : list ) -> int:
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowercase__ = float("""-inf""" )
for i in range(1 , n + 1 ):
lowercase__ = max(
__magic_name__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __magic_name__ , __magic_name__ ) , )
lowercase__ = max_revenue
return max_rev[n]
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : list ) -> Tuple:
"""simple docstring"""
_enforce_args(__magic_name__ , __magic_name__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowercase__ = [float("""-inf""" ) for _ in range(n + 1 )]
lowercase__ = 0
for i in range(1 , n + 1 ):
lowercase__ = max_rev[i]
for j in range(1 , i + 1 ):
lowercase__ = max(__magic_name__ , prices[j - 1] + max_rev[i - j] )
lowercase__ = max_revenue_i
return max_rev[n]
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : list ) -> Union[str, Any]:
"""simple docstring"""
if n < 0:
lowercase__ = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(__magic_name__ )
if n > len(__magic_name__ ):
lowercase__ = (
"""Each integral piece of rod must have a corresponding price. """
f'''Got n = {n} but length of prices = {len(__magic_name__ )}'''
)
raise ValueError(__magic_name__ )
def UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
lowercase__ = [6, 10, 12, 15, 20, 23]
lowercase__ = len(__magic_name__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowercase__ = 36
lowercase__ = top_down_cut_rod(__magic_name__ , __magic_name__ )
lowercase__ = bottom_up_cut_rod(__magic_name__ , __magic_name__ )
lowercase__ = naive_cut_rod_recursive(__magic_name__ , __magic_name__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 15 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = int(_SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ :int = divmod(_SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(_SCREAMING_SNAKE_CASE ) + str(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :str = str(_SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('No input value was provided' )
lowerCAmelCase__ :Dict = '-' if number.startswith('-' ) else ''
lowerCAmelCase__ :int = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return F"{negative}0b{binary_recursive(int(_SCREAMING_SNAKE_CASE ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : List[str] = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "blenderbot-small"
lowerCamelCase__ = ["past_key_values"]
lowerCamelCase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , __lowerCamelCase : List[str]=50265 , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Optional[Any]=8 , __lowerCamelCase : str=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : Optional[int]=8 , __lowerCamelCase : Optional[int]=2048 , __lowerCamelCase : str=16 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int="gelu" , __lowerCamelCase : str=512 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Optional[Any]=2 , **__lowerCamelCase : Optional[int] , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@property
def _snake_case ( self : Any ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE = {0: "batch"}
SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.num_layers
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE = {0: "batch", 2: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _snake_case ( self : Optional[int] ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE = super().outputs
else:
SCREAMING_SNAKE_CASE = super(__lowerCamelCase , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.num_layers
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _snake_case ( self : Any , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Generate decoder inputs
SCREAMING_SNAKE_CASE = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE = dict(**__lowerCamelCase , **__lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE = common_inputs["decoder_input_ids"].shape[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.num_attention_heads
SCREAMING_SNAKE_CASE = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE = decoder_seq_length + 3
SCREAMING_SNAKE_CASE = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 )
SCREAMING_SNAKE_CASE = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.num_layers
SCREAMING_SNAKE_CASE = min(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers
SCREAMING_SNAKE_CASE = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__lowerCamelCase , __lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) )
return common_inputs
def _snake_case ( self : Optional[Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE = seqlen + 2
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.num_layers
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.num_attention_heads
SCREAMING_SNAKE_CASE = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE = common_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE = torch.cat(
[common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
SCREAMING_SNAKE_CASE = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase )
]
return common_inputs
def _snake_case ( self : Tuple , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE = tokenizer.num_special_tokens_to_add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return common_inputs
def _snake_case ( self : Optional[Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_causal_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
return common_inputs
def _snake_case ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
SCREAMING_SNAKE_CASE = super(__lowerCamelCase , self )._flatten_past_key_values_(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) | 16 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 0 |
from __future__ import annotations
from cmath import sqrt
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> tuple[complex, complex]:
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
__A : str = b * b - 4 * a * c
__A : Union[str, Any] = (-b + sqrt(a__ )) / (2 * a)
__A : List[Any] = (-b - sqrt(a__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __SCREAMING_SNAKE_CASE ( ) -> str:
__A , __A : Dict = quadratic_roots(a=5 ,b=6 ,c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 17 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__A = logging.get_logger(__name__)
__A = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = """deberta-v2"""
def __init__( self , __UpperCAmelCase=1_2_8_1_0_0 , __UpperCAmelCase=1_5_3_6 , __UpperCAmelCase=2_4 , __UpperCAmelCase=2_4 , __UpperCAmelCase=6_1_4_4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-7 , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=0 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=0 , __UpperCAmelCase="gelu" , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = hidden_size
lowerCAmelCase__ :Optional[int] = num_hidden_layers
lowerCAmelCase__ :Optional[int] = num_attention_heads
lowerCAmelCase__ :int = intermediate_size
lowerCAmelCase__ :Dict = hidden_act
lowerCAmelCase__ :Tuple = hidden_dropout_prob
lowerCAmelCase__ :Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ :Optional[Any] = max_position_embeddings
lowerCAmelCase__ :Optional[Any] = type_vocab_size
lowerCAmelCase__ :List[Any] = initializer_range
lowerCAmelCase__ :Dict = relative_attention
lowerCAmelCase__ :Union[str, Any] = max_relative_positions
lowerCAmelCase__ :List[str] = pad_token_id
lowerCAmelCase__ :Optional[int] = position_biased_input
# Backwards compatibility
if type(__UpperCAmelCase ) == str:
lowerCAmelCase__ :Dict = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCAmelCase__ :str = pos_att_type
lowerCAmelCase__ :Dict = vocab_size
lowerCAmelCase__ :Tuple = layer_norm_eps
lowerCAmelCase__ :Optional[Any] = kwargs.get('pooler_hidden_size' , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = pooler_dropout
lowerCAmelCase__ :int = pooler_hidden_act
class _lowerCAmelCase ( a ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ :str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase__ :Tuple = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def snake_case ( self ):
'''simple docstring'''
return 1_2
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = 3 , __UpperCAmelCase = 4_0 , __UpperCAmelCase = 4_0 , __UpperCAmelCase = None , ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = super().generate_dummy_inputs(preprocessor=__UpperCAmelCase , framework=__UpperCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 93 | 0 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_SCREAMING_SNAKE_CASE = imread(r"digital_image_processing/image_data/lena_small.jpg")
_SCREAMING_SNAKE_CASE = cvtColor(img, COLOR_BGR2GRAY)
def __a():
'''simple docstring'''
_lowerCAmelCase = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def __a():
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def __a():
'''simple docstring'''
_lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __a():
'''simple docstring'''
_lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_lowerCAmelCase = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def __a():
'''simple docstring'''
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def __a():
'''simple docstring'''
_lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_lowerCAmelCase = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def __a():
'''simple docstring'''
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def __a():
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def __a():
'''simple docstring'''
_lowerCAmelCase = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def __a(SCREAMING_SNAKE_CASE_ : str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
_lowerCAmelCase = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __a(SCREAMING_SNAKE_CASE_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
_lowerCAmelCase = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __a():
'''simple docstring'''
_lowerCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
_lowerCAmelCase = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = image[x_coordinate][y_coordinate]
_lowerCAmelCase = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_lowerCAmelCase = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 18 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_a = """\
"""
_a = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_a = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase( datasets.Metric ):
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string'''),
}) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def UpperCAmelCase ( self , __a , __a , __a = 16 , __a = True , __a=None) -> Dict:
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_UpperCamelCase = '''cuda'''
else:
_UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_UpperCamelCase = AutoModelForCausalLM.from_pretrained(__a)
_UpperCamelCase = model.to(__a)
_UpperCamelCase = AutoTokenizer.from_pretrained(__a)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_UpperCamelCase = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(__a) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_UpperCamelCase = model.config.max_length - 1
else:
_UpperCamelCase = model.config.max_length
_UpperCamelCase = tokenizer(
__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , return_tensors='''pt''' , return_attention_mask=__a , ).to(__a)
_UpperCamelCase = encodings['''input_ids''']
_UpperCamelCase = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1) , 1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1) , 2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_UpperCamelCase = []
_UpperCamelCase = CrossEntropyLoss(reduction='''none''')
for start_index in logging.tqdm(range(0 , len(__a) , __a)):
_UpperCamelCase = min(start_index + batch_size , len(__a))
_UpperCamelCase = encoded_texts[start_index:end_index]
_UpperCamelCase = attn_masks[start_index:end_index]
if add_start_token:
_UpperCamelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(__a)
_UpperCamelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1)
_UpperCamelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa).to(__a), attn_mask] , dim=1)
_UpperCamelCase = encoded_batch
with torch.no_grad():
_UpperCamelCase = model(__a , attention_mask=__a).logits
_UpperCamelCase = out_logits[..., :-1, :].contiguous()
_UpperCamelCase = labels[..., 1:].contiguous()
_UpperCamelCase = attn_mask[..., 1:].contiguous()
_UpperCamelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2) , __a) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1))
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__a)}
| 19 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Any:
a__ =parent
a__ =13
a__ =7
a__ =True
a__ =True
a__ =True
a__ =True
a__ =99
a__ =384
a__ =2
a__ =4
a__ =37
a__ ='gelu'
a__ =0.1
a__ =0.1
a__ =512
a__ =16
a__ =2
a__ =0.02
a__ =3
a__ =4
a__ =128
a__ =2
a__ =9
a__ =1
a__ =None
def __UpperCamelCase ( self) -> Dict:
a__ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__ =None
if self.use_input_mask:
a__ =random_attention_mask([self.batch_size, self.seq_length])
a__ =None
if self.use_token_type_ids:
a__ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__ =None
a__ =None
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__ =ids_tensor([self.batch_size] , self.num_choices)
a__ =ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowercase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
a__ =TFConvBertModel(config=lowercase_)
a__ ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
a__ =[input_ids, input_mask]
a__ =model(lowercase_)
a__ =model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
a__ =TFConvBertForMaskedLM(config=lowercase_)
a__ ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
a__ =model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Any:
a__ =self.num_labels
a__ =TFConvBertForSequenceClassification(config=lowercase_)
a__ ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
a__ =model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
a__ =self.num_choices
a__ =TFConvBertForMultipleChoice(config=lowercase_)
a__ =tf.tile(tf.expand_dims(lowercase_ , 1) , (1, self.num_choices, 1))
a__ =tf.tile(tf.expand_dims(lowercase_ , 1) , (1, self.num_choices, 1))
a__ =tf.tile(tf.expand_dims(lowercase_ , 1) , (1, self.num_choices, 1))
a__ ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
a__ =model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[str]:
a__ =self.num_labels
a__ =TFConvBertForTokenClassification(config=lowercase_)
a__ ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
a__ =model(lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[str]:
a__ =TFConvBertForQuestionAnswering(config=lowercase_)
a__ ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
a__ =model(lowercase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) =config_and_inputs
a__ ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ (lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case =(
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> str:
a__ =TFConvBertModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def __UpperCamelCase ( self) -> int:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_)
def __UpperCamelCase ( self) -> Tuple:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_)
@slow
def __UpperCamelCase ( self) -> Tuple:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =True
if hasattr(lowercase_ , 'use_cache'):
a__ =True
a__ =getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
a__ =getattr(self.model_tester , 'key_length' , lowercase_)
for model_class in self.all_model_classes:
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model_class(lowercase_)
a__ =len(model(lowercase_))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ , saved_model=lowercase_)
a__ =os.path.join(lowercase_ , 'saved_model' , '1')
a__ =tf.keras.models.load_model(lowercase_)
a__ =model(lowercase_)
if self.is_encoder_decoder:
a__ =outputs['encoder_hidden_states']
a__ =outputs['encoder_attentions']
else:
a__ =outputs['hidden_states']
a__ =outputs['attentions']
self.assertEqual(len(lowercase_) , lowercase_)
a__ =getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(lowercase_) , lowercase_)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowercase_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase ( self) -> List[Any]:
a__ =TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
self.assertIsNotNone(lowercase_)
def __UpperCamelCase ( self) -> Optional[int]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length)
a__ =getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length)
a__ =getattr(self.model_tester , 'key_length' , lowercase_)
a__ =getattr(self.model_tester , 'key_length' , lowercase_)
def check_decoder_attentions_output(lowercase_):
a__ =len(lowercase_)
self.assertEqual(out_len % 2 , 0)
a__ =outputs.decoder_attentions
self.assertEqual(len(lowercase_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowercase_):
a__ =[
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowercase_) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
a__ =True
a__ =False
a__ =model_class(lowercase_)
a__ =model(self._prepare_for_class(lowercase_ , lowercase_))
a__ =len(lowercase_)
self.assertEqual(config.output_hidden_states , lowercase_)
check_encoder_attentions_output(lowercase_)
if self.is_encoder_decoder:
a__ =model_class(lowercase_)
a__ =model(self._prepare_for_class(lowercase_ , lowercase_))
self.assertEqual(config.output_hidden_states , lowercase_)
check_decoder_attentions_output(lowercase_)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a__ =True
a__ =model_class(lowercase_)
a__ =model(self._prepare_for_class(lowercase_ , lowercase_))
self.assertEqual(config.output_hidden_states , lowercase_)
check_encoder_attentions_output(lowercase_)
# Check attention is always last and order is fine
a__ =True
a__ =True
a__ =model_class(lowercase_)
a__ =model(self._prepare_for_class(lowercase_ , lowercase_))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_))
self.assertEqual(model.config.output_hidden_states , lowercase_)
check_encoder_attentions_output(lowercase_)
@require_tf
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =TFConvBertModel.from_pretrained('YituTech/conv-bert-base')
a__ =tf.constant([[0, 1, 2, 3, 4, 5]])
a__ =model(lowercase_)[0]
a__ =[1, 6, 768]
self.assertEqual(output.shape , lowercase_)
a__ =tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
])
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-4)
| 20 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__A = """sshleifer/bart-tiny-random"""
__A = """patrickvonplaten/t5-tiny-random"""
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
'''simple docstring'''
return AutoConfig.from_pretrained(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :List[str] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :Optional[int] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :List[Any] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=__UpperCAmelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :Optional[int] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ):
create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=__UpperCAmelCase , d=__UpperCAmelCase )
| 93 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowerCamelCase )] )
__magic_name__ : str =np.array(lowerCamelCase )
__magic_name__ : Union[str, Any] =np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , lowerCamelCase ) ) , x.transpose() ) , lowerCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : int =(1, 2, 1)
__magic_name__ : Tuple =(1, 1, 0, 7)
__magic_name__ : int =SARIMAX(
lowerCamelCase , exog=lowerCamelCase , order=lowerCamelCase , seasonal_order=lowerCamelCase )
__magic_name__ : Optional[Any] =model.fit(disp=lowerCamelCase , maxiter=600 , method="""nm""" )
__magic_name__ : Any =model_fit.predict(1 , len(lowerCamelCase ) , exog=[test_match] )
return result[0]
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(lowerCamelCase , lowerCamelCase )
__magic_name__ : List[str] =regressor.predict(lowerCamelCase )
return y_pred[0]
def lowerCAmelCase_ ( lowerCamelCase ):
train_user.sort()
__magic_name__ : List[Any] =np.percentile(lowerCamelCase , 25 )
__magic_name__ : Dict =np.percentile(lowerCamelCase , 75 )
__magic_name__ : int =qa - qa
__magic_name__ : List[Any] =qa - (iqr * 0.1)
return low_lim
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =0
__magic_name__ : Any =0
for i in list_vote:
if i > actual_result:
__magic_name__ : List[str] =not_safe + 1
else:
if abs(abs(lowerCamelCase ) - abs(lowerCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
UpperCAmelCase_ : Tuple = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
UpperCAmelCase_ : str = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
UpperCAmelCase_ : Tuple = Normalizer().fit_transform(data_input_df.values)
# split data
UpperCAmelCase_ : Dict = normalize_df[:, 2].tolist()
UpperCAmelCase_ : List[str] = normalize_df[:, 0].tolist()
UpperCAmelCase_ : List[Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
UpperCAmelCase_ : List[Any] = normalize_df[:, [1, 2]].tolist()
UpperCAmelCase_ : List[Any] = x[: len(x) - 1]
UpperCAmelCase_ : Tuple = x[len(x) - 1 :]
# for linear regression & sarimax
UpperCAmelCase_ : Dict = total_date[: len(total_date) - 1]
UpperCAmelCase_ : Optional[Any] = total_user[: len(total_user) - 1]
UpperCAmelCase_ : int = total_match[: len(total_match) - 1]
UpperCAmelCase_ : int = total_date[len(total_date) - 1 :]
UpperCAmelCase_ : Dict = total_user[len(total_user) - 1 :]
UpperCAmelCase_ : Optional[int] = total_match[len(total_match) - 1 :]
# voting system with forecasting
UpperCAmelCase_ : Union[str, Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
UpperCAmelCase_ : Optional[Any] = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 21 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
__A = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , ) ->Tuple:
"""simple docstring"""
output_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , use_external_data_format=_SCREAMING_SNAKE_CASE , enable_onnx_checker=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , )
else:
export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCAmelCase__ :Tuple = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
lowerCAmelCase__ :List[Any] = 'cpu'
lowerCAmelCase__ :List[str] = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , torch_dtype=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = Path(_SCREAMING_SNAKE_CASE )
# TEXT ENCODER
lowerCAmelCase__ :str = pipeline.text_encoder.config.max_position_embeddings
lowerCAmelCase__ :Dict = pipeline.text_encoder.config.hidden_size
lowerCAmelCase__ :List[Any] = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_SCREAMING_SNAKE_CASE , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=_SCREAMING_SNAKE_CASE , )
del pipeline.text_encoder
# UNET
lowerCAmelCase__ :int = pipeline.unet.config.in_channels
lowerCAmelCase__ :Optional[Any] = pipeline.unet.config.sample_size
lowerCAmelCase__ :Dict = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(2 ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
False,
) , output_path=_SCREAMING_SNAKE_CASE , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=_SCREAMING_SNAKE_CASE , use_external_data_format=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ :List[Any] = str(unet_path.absolute().as_posix() )
lowerCAmelCase__ :int = os.path.dirname(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = onnx.load(_SCREAMING_SNAKE_CASE )
# clean up existing tensor files
shutil.rmtree(_SCREAMING_SNAKE_CASE )
os.mkdir(_SCREAMING_SNAKE_CASE )
# collate external tensor files into one
onnx.save_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , save_as_external_data=_SCREAMING_SNAKE_CASE , all_tensors_to_one_file=_SCREAMING_SNAKE_CASE , location='weights.pb' , convert_attribute=_SCREAMING_SNAKE_CASE , )
del pipeline.unet
# VAE ENCODER
lowerCAmelCase__ :int = pipeline.vae
lowerCAmelCase__ :Optional[Any] = vae_encoder.config.in_channels
lowerCAmelCase__ :int = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowerCAmelCase__ :str = lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : vae_encoder.encode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0].sample()
onnx_export(
_SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_SCREAMING_SNAKE_CASE , )
# VAE DECODER
lowerCAmelCase__ :int = pipeline.vae
lowerCAmelCase__ :List[Any] = vae_decoder.config.latent_channels
lowerCAmelCase__ :Optional[int] = vae_decoder.config.out_channels
# forward only through the decoder part
lowerCAmelCase__ :Any = vae_encoder.decode
onnx_export(
_SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_SCREAMING_SNAKE_CASE , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowerCAmelCase__ :Optional[int] = pipeline.safety_checker
lowerCAmelCase__ :Optional[int] = safety_checker.config.vision_config.num_channels
lowerCAmelCase__ :Any = safety_checker.config.vision_config.image_size
lowerCAmelCase__ :List[str] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=_SCREAMING_SNAKE_CASE , )
del pipeline.safety_checker
lowerCAmelCase__ :Union[str, Any] = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
lowerCAmelCase__ :Dict = pipeline.feature_extractor
else:
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :Optional[int] = None
lowerCAmelCase__ :List[str] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(_SCREAMING_SNAKE_CASE )
print('ONNX pipeline saved to' , _SCREAMING_SNAKE_CASE )
del pipeline
del onnx_pipeline
lowerCAmelCase__ :Dict = OnnxStableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
__A = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 93 | 0 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def snake_case_ (UpperCamelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
'''simple docstring'''
_a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase , UpperCamelCase )
# Predict target for test data
_a = xgb.predict(UpperCamelCase )
_a = predictions.reshape(len(UpperCamelCase ) , 1 )
return predictions
def snake_case_ ():
'''simple docstring'''
_a = fetch_california_housing()
_a , _a = data_handling(UpperCamelCase )
_a , _a , _a , _a = train_test_split(
UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 )
_a = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Error printing
print(f'Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}' )
print(f'Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 22 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """mra"""
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=1 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="absolute" , _UpperCAmelCase=4 , _UpperCAmelCase="full" , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ) -> Dict:
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = block_per_row
UpperCamelCase_ = approx_mode
UpperCamelCase_ = initial_prior_first_n_blocks
UpperCamelCase_ = initial_prior_diagonal_n_blocks
| 23 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[str]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_SCREAMING_SNAKE_CASE ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 | 0 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = tempfile.mkdtemp()
lowerCAmelCase__ :List[Any] = BlipImageProcessor()
lowerCAmelCase__ :Union[str, Any] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowerCAmelCase__ :Union[str, Any] = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ :List[str] = InstructBlipProcessor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).qformer_tokenizer
def snake_case ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase__ :Dict = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase__ :str = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
lowerCAmelCase__ :Dict = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.get_image_processor()
lowerCAmelCase__ :List[Any] = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase__ :str = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Dict = self.prepare_image_inputs()
lowerCAmelCase__ :List[str] = image_processor(__UpperCAmelCase , return_tensors='np' )
lowerCAmelCase__ :Optional[Any] = processor(images=__UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.get_image_processor()
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase__ :Any = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 'lower newer'
lowerCAmelCase__ :Dict = processor(text=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = qformer_tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.get_image_processor()
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
lowerCAmelCase__ :str = self.get_qformer_tokenizer()
lowerCAmelCase__ :Dict = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = 'lower newer'
lowerCAmelCase__ :Dict = self.prepare_image_inputs()
lowerCAmelCase__ :Tuple = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_image_processor()
lowerCAmelCase__ :Dict = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase__ :int = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ :Tuple = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_image_processor()
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
lowerCAmelCase__ :Dict = self.get_qformer_tokenizer()
lowerCAmelCase__ :Optional[Any] = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 'lower newer'
lowerCAmelCase__ :Optional[int] = self.prepare_image_inputs()
lowerCAmelCase__ :int = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 93 | 0 |
from __future__ import annotations
from cmath import sqrt
def lowerCamelCase__ ( _a , _a , _a):
if a == 0:
raise ValueError("Coefficient 'a' must not be zero.")
SCREAMING_SNAKE_CASE : Tuple = b * b - 4 * a * c
SCREAMING_SNAKE_CASE : Dict = (-b + sqrt(_a)) / (2 * a)
SCREAMING_SNAKE_CASE : List[Any] = (-b - sqrt(_a)) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = quadratic_roots(a=5 , b=6 , c=1)
print(f"The solutions are: {solutiona} and {solutiona}")
if __name__ == "__main__":
main() | 25 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__A = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
F" reinstalling {pkg}." )
if not ops[op](version.parse(_SCREAMING_SNAKE_CASE ) , version.parse(_SCREAMING_SNAKE_CASE ) ):
raise ImportError(
F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) ->None:
"""simple docstring"""
lowerCAmelCase__ :List[str] = F"\n{hint}" if hint is not None else ''
# non-versioned check
if re.match(r'^[\w_\-\d]+$' , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = requirement, None, None
else:
lowerCAmelCase__ :List[str] = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , _SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F" got {requirement}" )
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = match[0]
lowerCAmelCase__ :List[Any] = want_full.split(',' ) # there could be multiple requirements
lowerCAmelCase__ :Any = {}
for w in want_range:
lowerCAmelCase__ :Tuple = re.findall(r'^([\s!=<>]{1,2})(.+)' , _SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F" but got {requirement}" )
lowerCAmelCase__ , lowerCAmelCase__ :int = match[0]
lowerCAmelCase__ :str = want_ver
if op not in ops:
raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
lowerCAmelCase__ :Any = '.'.join([str(_SCREAMING_SNAKE_CASE ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return
# check if any version is installed
try:
lowerCAmelCase__ :List[Any] = importlib.metadata.version(_SCREAMING_SNAKE_CASE )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 93 | 0 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
def decorator(_lowerCamelCase ):
__snake_case : str = getattr(_lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(_lowerCamelCase , """handle_key""" , _lowerCamelCase )
return func
return decorator
def _a ( *_lowerCamelCase ) -> str:
"""simple docstring"""
def decorator(_lowerCamelCase ):
__snake_case : List[Any] = getattr(_lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(_lowerCamelCase , """handle_key""" , _lowerCamelCase )
return func
return decorator
class _A ( __lowercase ):
def __new__( cls : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : str = super().__new__(cls , __magic_name__ , __magic_name__ , __magic_name__ )
if not hasattr(__magic_name__ , """key_handler""" ):
setattr(__magic_name__ , """key_handler""" , {} )
setattr(__magic_name__ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__snake_case : Optional[int] = getattr(__magic_name__ , """handle_key""" , [] )
for key in handled_keys:
__snake_case : int = value
return new_cls
@staticmethod
def lowercase__ ( cls : Any ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = get_character()
if char != KEYMAP["undefined"]:
__snake_case : Tuple = ord(__magic_name__ )
__snake_case : List[Any] = cls.key_handler.get(__magic_name__ )
if handler:
__snake_case : Any = char
return handler(cls )
else:
return None
def _a ( cls ) -> str:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 26 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :int = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def snake_case ( self , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__UpperCAmelCase ) )
lowerCAmelCase__ :List[str] = np.random.RandomState(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.get_dummy_inputs()
lowerCAmelCase__ :Optional[int] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Union[str, Any] = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :str = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs()
lowerCAmelCase__ :Optional[Any] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :int = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# warmup pass to apply optimizations
lowerCAmelCase__ :List[Any] = pipe(**self.get_dummy_inputs() )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs()
lowerCAmelCase__ :int = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Union[str, Any] = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Any = self.get_dummy_inputs()
lowerCAmelCase__ :List[str] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Optional[int] = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs()
lowerCAmelCase__ :Any = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :int = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Any = self.get_dummy_inputs()
lowerCAmelCase__ :List[Any] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Optional[Any] = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = ort.SessionOptions()
lowerCAmelCase__ :Optional[int] = False
return options
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCAmelCase__ :Any = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
lowerCAmelCase__ :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = 'A fantasy landscape, trending on artstation'
lowerCAmelCase__ :Optional[Any] = np.random.RandomState(0 )
lowerCAmelCase__ :List[str] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__UpperCAmelCase , output_type='np' , )
lowerCAmelCase__ :Any = output.images
lowerCAmelCase__ :List[str] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowerCAmelCase__ :List[Any] = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCAmelCase__ :Optional[Any] = init_image.resize((7_6_8, 5_1_2) )
lowerCAmelCase__ :List[Any] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowerCAmelCase__ :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = 'A fantasy landscape, trending on artstation'
lowerCAmelCase__ :List[Any] = np.random.RandomState(0 )
lowerCAmelCase__ :List[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__UpperCAmelCase , output_type='np' , )
lowerCAmelCase__ :Optional[Any] = output.images
lowerCAmelCase__ :int = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowerCAmelCase__ :List[Any] = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 93 | 0 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = ['image_processor', 'tokenizer']
__magic_name__ = 'BlipImageProcessor'
__magic_name__ = 'AutoTokenizer'
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
super().__init__(snake_case_ , snake_case_ )
# add QFormer tokenizer
_A = qformer_tokenizer
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_A = BatchFeature()
if text is not None:
_A = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
encoding.update(snake_case_ )
_A = self.qformer_tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
_A = qformer_text_encoding.pop('input_ids' )
_A = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_A = self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def lowerCAmelCase__ ( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self ):
_A = self.tokenizer.model_input_names
_A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCAmelCase__ ( self , snake_case_ , **snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_A = os.path.join(snake_case_ , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(snake_case_ )
return super().save_pretrained(snake_case_ , **snake_case_ )
@classmethod
def lowerCAmelCase__ ( cls , snake_case_ , **snake_case_ ):
_A = AutoTokenizer.from_pretrained(snake_case_ , subfolder='qformer_tokenizer' )
_A = cls._get_arguments_from_pretrained(snake_case_ , **snake_case_ )
args.append(snake_case_ )
return cls(*snake_case_ )
| 27 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__A = ["""gpt2"""]
__A = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :List[str] = tokenizer
lowerCAmelCase__ :Optional[int] = AutoConfig.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = TFGPTaLMHeadModel.from_config(__UpperCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = self.tokenizer(__UpperCAmelCase )
lowerCAmelCase__ :int = tokenized['input_ids'].to_tensor()
lowerCAmelCase__ :Optional[Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCAmelCase__ :int = self.model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ :List[str] = [GPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCAmelCase__ :Union[str, Any] = [TFGPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCAmelCase__ :int = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
lowerCAmelCase__ :str = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def snake_case ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowerCAmelCase__ :int = tokenizer([test_inputs] , return_tensors='tf' )
lowerCAmelCase__ :Optional[int] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCAmelCase__ :Optional[int] = python_outputs[key].numpy()
lowerCAmelCase__ :List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__UpperCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase__ :Optional[int] = tf.function(__UpperCAmelCase )
for test_inputs in self.test_sentences:
lowerCAmelCase__ :Any = tf.constant(__UpperCAmelCase )
lowerCAmelCase__ :int = compiled_tokenizer(__UpperCAmelCase )
lowerCAmelCase__ :Any = tf_tokenizer(__UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase__ :List[str] = ModelToSave(tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase__ :List[str] = model.serving(__UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCAmelCase__ :Union[str, Any] = Path(__UpperCAmelCase ) / 'saved.model'
tf.saved_model.save(__UpperCAmelCase , __UpperCAmelCase , signatures={'serving_default': model.serving} )
lowerCAmelCase__ :str = tf.saved_model.load(__UpperCAmelCase )
lowerCAmelCase__ :Dict = loaded_model.signatures['serving_default'](__UpperCAmelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase__ :List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase__ :List[str] = tf_tokenizer(__UpperCAmelCase ) # Build model with some sample inputs
lowerCAmelCase__ :Union[str, Any] = tf_tokenizer.get_config()
lowerCAmelCase__ :Tuple = TFGPTaTokenizer.from_config(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = model_from_config(__UpperCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCAmelCase__ :int = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
lowerCAmelCase__ :Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase__ :List[str] = tf_tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 93 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
self.test()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[int] = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE : str = self.advance()
if not self.does_advance(A ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.update(A )
counter += 1
if counter > 10_000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def UpperCamelCase_ ( self ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase_ ( self, A=False ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super(A, self ).__init__()
if not isinstance(A, A ) or len(A ) == 0:
raise ValueError(F"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(A, A ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
SCREAMING_SNAKE_CASE : Tuple = token_ids
SCREAMING_SNAKE_CASE : Tuple = len(self.token_ids )
SCREAMING_SNAKE_CASE : Optional[int] = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE : Any = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(A )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(A )}" )
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : int = False
if self.does_advance(A ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE : int = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE : int = True
self.reset()
return stepped, completed, reset
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Dict = 0
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCamelCase_ ( self, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : str = self.seqlen
SCREAMING_SNAKE_CASE : List[Any] = self.fulfilled_idx
SCREAMING_SNAKE_CASE : List[Any] = self.completed
return new_constraint
class _a :
'''simple docstring'''
def __init__( self, A, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = max([len(A ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE : Optional[int] = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE : Dict = root
for tidx, token_id in enumerate(A ):
if token_id not in level:
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : Optional[Any] = level[token_id]
if no_subsets and self.has_subsets(A, A ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F" {nested_token_ids}." )
SCREAMING_SNAKE_CASE : Any = root
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE : Optional[Any] = start[current_token]
SCREAMING_SNAKE_CASE : Optional[Any] = list(start.keys() )
return next_tokens
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.next_tokens(A )
return len(A ) == 0
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = list(root.values() )
if len(A ) == 0:
return 1
else:
return sum([self.count_leaves(A ) for nn in next_nodes] )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.count_leaves(A )
return len(A ) != leaf_count
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super(A, self ).__init__()
if not isinstance(A, A ) or len(A ) == 0:
raise ValueError(F"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(A, A ) for token_ids in nested_token_ids ):
raise ValueError(F"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(A, A ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
SCREAMING_SNAKE_CASE : Dict = DisjunctiveTrie(A )
SCREAMING_SNAKE_CASE : int = nested_token_ids
SCREAMING_SNAKE_CASE : int = self.trie.max_height
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq )
if len(A ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(A )}" )
SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(A )}" )
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
if self.does_advance(A ):
self.current_seq.append(A )
SCREAMING_SNAKE_CASE : Tuple = True
else:
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
SCREAMING_SNAKE_CASE : int = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE : List[str] = completed
return stepped, completed, reset
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCamelCase_ ( self, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : Tuple = self.seqlen
SCREAMING_SNAKE_CASE : Dict = self.current_seq
SCREAMING_SNAKE_CASE : str = self.completed
return new_constraint
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE : List[str] = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE : str = len(A )
SCREAMING_SNAKE_CASE : Any = False
self.init_state()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : str = [constraint.copy(stateful=A ) for constraint in self.constraints]
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE : List[str] = constraint.advance()
if isinstance(A, A ):
token_list.append(A )
elif isinstance(A, A ):
token_list.extend(A )
else:
SCREAMING_SNAKE_CASE : List[Any] = self.inprogress_constraint.advance()
if isinstance(A, A ):
token_list.append(A )
elif isinstance(A, A ):
token_list.extend(A )
if len(A ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.add(A )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not isinstance(A, A ):
raise ValueError(F"`token_id` should be an `int`, but is `{token_id}`." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = False, False
if self.completed:
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Dict = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.update(A )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A ) )
SCREAMING_SNAKE_CASE : Dict = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE : Any = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(A ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = pending_constraint.update(A )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(A )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if not complete and stepped:
SCREAMING_SNAKE_CASE : Union[str, Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE : Any = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE : Dict = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCamelCase_ ( self, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE : int = [
constraint.copy(stateful=A ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.copy(stateful=A )
SCREAMING_SNAKE_CASE : Optional[Any] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 28 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __A (_SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = 384
if "tiny" in model_name:
lowerCAmelCase__ :List[Any] = [3, 3, 9, 3]
lowerCAmelCase__ :Tuple = [96, 192, 384, 768]
if "small" in model_name:
lowerCAmelCase__ :Union[str, Any] = [3, 3, 27, 3]
lowerCAmelCase__ :Any = [96, 192, 384, 768]
if "base" in model_name:
lowerCAmelCase__ :Dict = [3, 3, 27, 3]
lowerCAmelCase__ :Any = [128, 256, 512, 1024]
lowerCAmelCase__ :Union[str, Any] = 512
if "large" in model_name:
lowerCAmelCase__ :int = [3, 3, 27, 3]
lowerCAmelCase__ :Any = [192, 384, 768, 1536]
lowerCAmelCase__ :Optional[Any] = 768
if "xlarge" in model_name:
lowerCAmelCase__ :Optional[Any] = [3, 3, 27, 3]
lowerCAmelCase__ :str = [256, 512, 1024, 2048]
lowerCAmelCase__ :Union[str, Any] = 1024
# set label information
lowerCAmelCase__ :Tuple = 150
lowerCAmelCase__ :List[Any] = 'huggingface/label-files'
lowerCAmelCase__ :Tuple = 'ade20k-id2label.json'
lowerCAmelCase__ :Tuple = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ :Dict = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase__ :int = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ :List[str] = ConvNextConfig(
depths=_SCREAMING_SNAKE_CASE , hidden_sizes=_SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
lowerCAmelCase__ :Union[str, Any] = UperNetConfig(
backbone_config=_SCREAMING_SNAKE_CASE , auxiliary_in_channels=_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE , )
return config
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :str = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = val
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Dict = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
lowerCAmelCase__ :List[Any] = model_name_to_url[model_name]
lowerCAmelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' )['state_dict']
lowerCAmelCase__ :List[Any] = get_upernet_config(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = UperNetForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCAmelCase__ :Optional[int] = state_dict.pop(_SCREAMING_SNAKE_CASE )
if "bn" in key:
lowerCAmelCase__ :Any = key.replace('bn' , 'batch_norm' )
lowerCAmelCase__ :int = val
# rename keys
lowerCAmelCase__ :Optional[Any] = create_rename_keys(_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify on image
lowerCAmelCase__ :str = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
lowerCAmelCase__ :Optional[Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
lowerCAmelCase__ :Tuple = SegformerImageProcessor()
lowerCAmelCase__ :List[Any] = processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
lowerCAmelCase__ :Optional[Any] = model(_SCREAMING_SNAKE_CASE )
if model_name == "upernet-convnext-tiny":
lowerCAmelCase__ :Optional[Any] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
lowerCAmelCase__ :Union[str, Any] = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
lowerCAmelCase__ :Dict = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
lowerCAmelCase__ :List[str] = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
lowerCAmelCase__ :Optional[Any] = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F'''upernet-convnext-{size}''' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 93 | 0 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
a__: Optional[int] = StableDiffusionDiffEditPipeline
a__: Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
a__: Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
a__: str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__: Dict = frozenset([] )
def UpperCAmelCase__ ( self ):
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , )
lowerCamelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
lowerCamelCase_ = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase , set_alpha_to_zero=UpperCAmelCase , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
lowerCamelCase_ = CLIPTextModel(UpperCAmelCase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=0 ):
lowerCamelCase_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
lowerCamelCase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if str(UpperCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ = torch.manual_seed(UpperCAmelCase )
else:
lowerCamelCase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowerCamelCase_ = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=0 ):
lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert('''RGB''' )
if str(UpperCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ = torch.manual_seed(UpperCAmelCase )
else:
lowerCamelCase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowerCamelCase_ = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=0 ):
lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert('''RGB''' )
if str(UpperCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ = torch.manual_seed(UpperCAmelCase )
else:
lowerCamelCase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowerCamelCase_ = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self ):
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase_ = self.get_dummy_inputs(UpperCAmelCase )
lowerCamelCase_ = pipe(**UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = self.pipeline_class.from_pretrained(UpperCAmelCase )
pipe_loaded.to(UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase , UpperCAmelCase ) is None , f"`{optional_component}` did not stay set to None after loading." , )
lowerCamelCase_ = self.get_dummy_inputs(UpperCAmelCase )
lowerCamelCase_ = pipe_loaded(**UpperCAmelCase )[0]
lowerCamelCase_ = np.abs(output - output_loaded ).max()
self.assertLess(UpperCAmelCase , 1e-4 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = '''cpu'''
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCamelCase_ = self.get_dummy_mask_inputs(UpperCAmelCase )
lowerCamelCase_ = pipe.generate_mask(**UpperCAmelCase )
lowerCamelCase_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCamelCase_ = np.array([0] * 9 )
lowerCamelCase_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = '''cpu'''
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCamelCase_ = self.get_dummy_inversion_inputs(UpperCAmelCase )
lowerCamelCase_ = pipe.invert(**UpperCAmelCase ).images
lowerCamelCase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
lowerCamelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
def UpperCAmelCase__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = '''cpu'''
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = {'''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''beta_schedule''': '''scaled_linear'''}
lowerCamelCase_ = DPMSolverMultistepScheduler(**UpperCAmelCase )
lowerCamelCase_ = DPMSolverMultistepInverseScheduler(**UpperCAmelCase )
lowerCamelCase_ = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCamelCase_ = self.get_dummy_inversion_inputs(UpperCAmelCase )
lowerCamelCase_ = pipe.invert(**UpperCAmelCase ).images
lowerCamelCase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase_ = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
lowerCamelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase__ ( cls ):
lowerCamelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
lowerCamelCase_ = raw_image.convert('''RGB''' ).resize((768, 768) )
lowerCamelCase_ = raw_image
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa )
lowerCamelCase_ = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCamelCase_ = '''a bowl of fruit'''
lowerCamelCase_ = '''a bowl of pears'''
lowerCamelCase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase , target_prompt=UpperCAmelCase , generator=UpperCAmelCase , )
lowerCamelCase_ = pipe.invert(
prompt=UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase ).latents
lowerCamelCase_ = pipe(
prompt=UpperCAmelCase , mask_image=UpperCAmelCase , image_latents=UpperCAmelCase , generator=UpperCAmelCase , negative_prompt=UpperCAmelCase , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
lowerCamelCase_ = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa )
lowerCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCamelCase_ = '''a bowl of fruit'''
lowerCamelCase_ = '''a bowl of pears'''
lowerCamelCase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase , target_prompt=UpperCAmelCase , generator=UpperCAmelCase , )
lowerCamelCase_ = pipe.invert(
prompt=UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase , num_inference_steps=25 , ).latents
lowerCamelCase_ = pipe(
prompt=UpperCAmelCase , mask_image=UpperCAmelCase , image_latents=UpperCAmelCase , generator=UpperCAmelCase , negative_prompt=UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
lowerCamelCase_ = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 29 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__A = logging.get_logger(__name__)
__A = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__A = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
__A = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = VOCAB_FILES_NAMES
__magic_name__ :List[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ :str = ["""input_ids""", """attention_mask"""]
__magic_name__ :Any = RobertaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Optional[int] = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
lowerCAmelCase__ :List[Any] = add_prefix_space
lowerCAmelCase__ :str = pre_tok_class(**__UpperCAmelCase )
lowerCAmelCase__ :List[str] = add_prefix_space
lowerCAmelCase__ :str = 'post_processor'
lowerCAmelCase__ :Optional[Any] = getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
if tokenizer_component_instance:
lowerCAmelCase__ :Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase__ :Any = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase__ :int = tuple(state['cls'] )
lowerCAmelCase__ :List[Any] = False
if state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Union[str, Any] = add_prefix_space
lowerCAmelCase__ :Any = True
if state.get('trim_offsets' , __UpperCAmelCase ) != trim_offsets:
lowerCAmelCase__ :Union[str, Any] = trim_offsets
lowerCAmelCase__ :Optional[int] = True
if changes_to_apply:
lowerCAmelCase__ :str = getattr(__UpperCAmelCase , state.pop('type' ) )
lowerCAmelCase__ :Any = component_class(**__UpperCAmelCase )
setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
@property
def snake_case ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else value
lowerCAmelCase__ :List[str] = value
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = [self.sep_token_id]
lowerCAmelCase__ :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 93 | 0 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__a = logging.getLogger(__name__)
__a = tf.data.AUTOTUNE
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=_lowercase , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=_lowercase , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=_lowercase , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=_lowercase , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=_lowercase , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=_lowercase , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=_lowercase , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=_lowercase , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=_lowercase , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=_lowercase , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=_lowercase , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=_lowercase , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=_lowercase , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=_lowercase , default=0.15 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=_lowercase , required=_lowercase , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=_lowercase , help='''Model ID to upload to on the Hugging Face Hub.''' )
UpperCAmelCase_ : Optional[int] = parser.parse_args()
return args
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
try:
if args.tpu_name:
UpperCAmelCase_ : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase_ : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(_lowercase )
tf.tpu.experimental.initialize_tpu_system(_lowercase )
return tpu
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = 0
for file in file_list:
UpperCAmelCase_ : Optional[int] = file.split('''/''' )[-1]
UpperCAmelCase_ : Tuple = re.search(r'''-\d+-(\d+)\.tfrecord''' , _lowercase ).group(1 )
UpperCAmelCase_ : Dict = int(_lowercase )
num_samples += sample_count
return num_samples
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = count_samples(_lowercase )
UpperCAmelCase_ : List[Any] = tf.data.Dataset.from_tensor_slices(_lowercase )
if shuffle:
UpperCAmelCase_ : List[Any] = dataset.shuffle(len(_lowercase ) )
UpperCAmelCase_ : Any = tf.data.TFRecordDataset(_lowercase , num_parallel_reads=_lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase_ : Any = dataset.apply(tf.data.experimental.assert_cardinality(_lowercase ) )
UpperCAmelCase_ : List[Any] = dataset.map(_lowercase , num_parallel_calls=_lowercase )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase_ : Tuple = dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase_ : Optional[Any] = dataset.batch(_lowercase , drop_remainder=_lowercase )
UpperCAmelCase_ : Optional[Any] = dataset.map(_lowercase , num_parallel_calls=_lowercase )
UpperCAmelCase_ : Tuple = dataset.prefetch(_lowercase )
return dataset
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not args.no_tpu:
UpperCAmelCase_ : List[Any] = initialize_tpu(_lowercase )
UpperCAmelCase_ : Optional[int] = tf.distribute.TPUStrategy(_lowercase )
else:
UpperCAmelCase_ : Dict = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase_ : int = tokenizer.vocab_size
UpperCAmelCase_ : Union[str, Any] = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
UpperCAmelCase_ : Tuple = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
UpperCAmelCase_ : Dict = count_samples(_lowercase )
UpperCAmelCase_ : Any = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase_ : Optional[int] = steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase_ : Any = TFAutoModelForMaskedLM.from_config(_lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase_, UpperCAmelCase_ : Any = create_optimizer(
num_train_steps=_lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_lowercase , metrics=['''accuracy'''] )
def decode_fn(_lowercase ):
UpperCAmelCase_ : List[Any] = {
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_lowercase , _lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase_ : str = DataCollatorForLanguageModeling(
tokenizer=_lowercase , mlm_probability=args.mlm_probability , mlm=_lowercase , return_tensors='''tf''' )
def mask_with_collator(_lowercase ):
# TF really needs an isin() function
UpperCAmelCase_ : Dict = (
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
UpperCAmelCase_, UpperCAmelCase_ : List[Any] = data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(_lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_lowercase , )
return batch
UpperCAmelCase_ : Any = args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase_ : Any = prepare_dataset(
_lowercase , decode_fn=_lowercase , mask_fn=_lowercase , batch_size=_lowercase , shuffle=_lowercase , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase_ : Dict = prepare_dataset(
_lowercase , decode_fn=_lowercase , mask_fn=_lowercase , batch_size=_lowercase , shuffle=_lowercase , )
UpperCAmelCase_ : Optional[Any] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_lowercase ) )
model.fit(
_lowercase , validation_data=_lowercase , epochs=args.num_epochs , callbacks=_lowercase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__a = parse_args()
main(args) | 30 |
"""simple docstring"""
from __future__ import annotations
import math
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__A = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def __A (_SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
lowerCAmelCase__ :Tuple = []
for num in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :int = 0
while 2 * i * i <= odd_composites[num]:
lowerCAmelCase__ :int = odd_composites[num] - 2 * i * i
if is_prime(_SCREAMING_SNAKE_CASE ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_SCREAMING_SNAKE_CASE ) == n:
return list_nums
return []
def __A () ->int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 93 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCamelCase__ : int = None
lowerCamelCase__ : Any = logging.get_logger(__name__)
lowerCamelCase__ : str = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : Union[str, Any] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ : Optional[Any] = {
'facebook/nllb-large-en-ro': 1_024,
'facebook/nllb-200-distilled-600M': 1_024,
}
# fmt: off
lowerCamelCase__ : List[Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = ["input_ids", "attention_mask"]
lowercase_ = NllbTokenizer
lowercase_ = []
lowercase_ = []
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]="<s>" , _lowerCAmelCase : Tuple="</s>" , _lowerCAmelCase : Any="</s>" , _lowerCAmelCase : Union[str, Any]="<s>" , _lowerCAmelCase : Dict="<unk>" , _lowerCAmelCase : int="<pad>" , _lowerCAmelCase : Optional[int]="<mask>" , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE_ = legacy_behaviour
super().__init__(
vocab_file=_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , src_lang=_lowerCAmelCase , tgt_lang=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , legacy_behaviour=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
SCREAMING_SNAKE_CASE_ = {
lang_code: self.convert_tokens_to_ids(_lowerCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE_ = src_lang if src_lang is not None else 'eng_Latn'
SCREAMING_SNAKE_CASE_ = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase_ ( self : Tuple ):
return self._src_lang
@src_lang.setter
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] , _lowerCAmelCase : Optional[str] , **_lowerCAmelCase : Optional[Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
SCREAMING_SNAKE_CASE_ = src_lang
SCREAMING_SNAKE_CASE_ = self(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.convert_tokens_to_ids(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tgt_lang_id
return inputs
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str = "eng_Latn" , _lowerCAmelCase : Optional[List[str]] = None , _lowerCAmelCase : str = "fra_Latn" , **_lowerCAmelCase : List[str] , ):
SCREAMING_SNAKE_CASE_ = src_lang
SCREAMING_SNAKE_CASE_ = tgt_lang
return super().prepare_seqaseq_batch(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase_ ( self : Any ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = self.convert_tokens_to_ids(_lowerCAmelCase )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE_ = [self.cur_lang_code]
SCREAMING_SNAKE_CASE_ = [self.eos_token_id]
SCREAMING_SNAKE_CASE_ = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE_ = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = self.convert_tokens_to_ids(_lowerCAmelCase )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE_ = [self.cur_lang_code]
SCREAMING_SNAKE_CASE_ = [self.eos_token_id]
SCREAMING_SNAKE_CASE_ = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE_ = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,) | 31 |
"""simple docstring"""
import re
def __A (_SCREAMING_SNAKE_CASE ) ->list:
"""simple docstring"""
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )]
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
try:
lowerCAmelCase__ :Any = split_input(_SCREAMING_SNAKE_CASE )
if upper:
lowerCAmelCase__ :str = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCAmelCase__ :int = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_simple_case(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
try:
lowerCAmelCase__ :str = to_simple_case(_SCREAMING_SNAKE_CASE )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '_' )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '-' )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 93 | 0 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __UpperCamelCase ( A__ ):
__A : str = """M-CLIP"""
def __init__( self , _UpperCamelCase=1024 , _UpperCamelCase=768 , **_UpperCamelCase ):
_UpperCAmelCase = transformerDimSize
_UpperCAmelCase = imageDimSize
super().__init__(**_UpperCamelCase )
class __UpperCamelCase ( A__ ):
__A : Any = MCLIPConfig
def __init__( self , _UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ):
super().__init__(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
_UpperCAmelCase = XLMRobertaModel(_UpperCamelCase )
_UpperCAmelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = self.transformer(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase )[0]
_UpperCAmelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_UpperCamelCase ), embs | 32 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
__A = """"""
__A = """"""
__A = """"""
__A = """"""
def __A (_SCREAMING_SNAKE_CASE ) ->None:
"""simple docstring"""
lowerCAmelCase__ :Any = tweepy.OAuthHandler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
auth.set_access_token(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = tweepy.API(_SCREAMING_SNAKE_CASE )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase__ :Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase__ :Optional[Any] = api.user_timeline(screen_name=_SCREAMING_SNAKE_CASE , count=200 )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# save the id of the oldest tweet less one
lowerCAmelCase__ :Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_SCREAMING_SNAKE_CASE ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase__ :Union[str, Any] = api.user_timeline(
screen_name=_SCREAMING_SNAKE_CASE , count=200 , max_id=_SCREAMING_SNAKE_CASE )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# update the id of the oldest tweet less one
lowerCAmelCase__ :Tuple = alltweets[-1].id - 1
print(F"...{len(_SCREAMING_SNAKE_CASE )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase__ :Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , 'w' ) as f:
lowerCAmelCase__ :List[str] = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 93 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"""facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : int = 'wav2vec2'
def __init__( self:Tuple , _a:List[Any]=32 , _a:int=7_68 , _a:Dict=12 , _a:Tuple=12 , _a:Optional[Any]=30_72 , _a:List[str]="gelu" , _a:List[Any]=0.1 , _a:List[Any]=0.1 , _a:List[Any]=0.1 , _a:List[Any]=0.0 , _a:Union[str, Any]=0.0 , _a:List[str]=0.1 , _a:Optional[int]=0.1 , _a:Optional[Any]=0.02 , _a:Tuple=1e-5 , _a:Optional[Any]="group" , _a:Dict="gelu" , _a:Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _a:Optional[int]=(5, 2, 2, 2, 2, 2, 2) , _a:Dict=(10, 3, 3, 3, 3, 2, 2) , _a:Optional[int]=False , _a:List[str]=1_28 , _a:Optional[int]=16 , _a:Optional[int]=False , _a:Union[str, Any]=True , _a:Dict=0.05 , _a:Tuple=10 , _a:Optional[int]=2 , _a:str=0.0 , _a:Tuple=10 , _a:List[str]=0 , _a:List[str]=3_20 , _a:Any=2 , _a:List[Any]=0.1 , _a:List[Any]=1_00 , _a:Tuple=2_56 , _a:List[Any]=2_56 , _a:Any=0.1 , _a:Tuple="sum" , _a:Union[str, Any]=False , _a:Tuple=False , _a:Tuple=2_56 , _a:Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 15_00) , _a:Union[str, Any]=(5, 3, 3, 1, 1) , _a:Dict=(1, 2, 3, 1, 1) , _a:Dict=5_12 , _a:Tuple=0 , _a:Any=1 , _a:Union[str, Any]=2 , _a:List[Any]=False , _a:Optional[int]=3 , _a:Tuple=2 , _a:Any=3 , _a:Union[str, Any]=None , _a:int=None , **_a:Optional[Any] , ):
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(_a )
snake_case__ = list(_a )
snake_case__ = list(_a )
snake_case__ = conv_bias
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim )
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = vocab_size
snake_case__ = do_stable_layer_norm
snake_case__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
snake_case__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case__ = num_codevectors_per_group
snake_case__ = num_codevector_groups
snake_case__ = contrastive_logits_temperature
snake_case__ = feat_quantizer_dropout
snake_case__ = num_negatives
snake_case__ = codevector_dim
snake_case__ = proj_codevector_dim
snake_case__ = diversity_loss_weight
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# adapter
snake_case__ = add_adapter
snake_case__ = adapter_kernel_size
snake_case__ = adapter_stride
snake_case__ = num_adapter_layers
snake_case__ = output_hidden_size or hidden_size
snake_case__ = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ = list(_a )
snake_case__ = list(_a )
snake_case__ = list(_a )
snake_case__ = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 33 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCAmelCase ( a , a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :int = StableUnCLIPPipeline
__magic_name__ :int = TEXT_TO_IMAGE_PARAMS
__magic_name__ :List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ :Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__ :Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__magic_name__ :List[str] = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 3_2
lowerCAmelCase__ :List[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=__UpperCAmelCase , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ :int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=__UpperCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=__UpperCAmelCase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase__ :Dict = StableUnCLIPImageNormalizer(embedding_dim=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase__ :str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__UpperCAmelCase , layers_per_block=1 , upcast_attention=__UpperCAmelCase , use_linear_projection=__UpperCAmelCase , )
torch.manual_seed(0 )
lowerCAmelCase__ :Tuple = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = AutoencoderKL()
lowerCAmelCase__ :Optional[int] = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :str = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[int] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__UpperCAmelCase )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
lowerCAmelCase__ :List[Any] = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ :List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ :List[str] = pipe('anime turle' , generator=__UpperCAmelCase , output_type='np' )
lowerCAmelCase__ :Union[str, Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ :int = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
lowerCAmelCase__ :List[str] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ :Tuple = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ :str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 93 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def UpperCAmelCase__ ( self) -> List[Any]:
torch.manual_seed(0)
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , )
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_)
torch.manual_seed(0)
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0)
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_)
UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=0) -> Dict:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase_)).to(lowerCamelCase_)
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCamelCase = Image.fromarray(np.uinta(lowerCamelCase_)).convert('''RGB''').resize((6_4, 6_4))
UpperCamelCase = Image.fromarray(np.uinta(image + 4)).convert('''RGB''').resize((6_4, 6_4))
if str(lowerCamelCase_).startswith('''mps'''):
UpperCamelCase = torch.manual_seed(lowerCamelCase_)
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_).manual_seed(lowerCamelCase_)
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionInpaintPipeline(**lowerCamelCase_)
UpperCamelCase = sd_pipe.to(lowerCamelCase_)
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_)
UpperCamelCase = sd_pipe(**lowerCamelCase_).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase_ , safety_checker=lowerCamelCase_)
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 9e-3
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''np''' , )
UpperCamelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def UpperCAmelCase__ ( self) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
UpperCamelCase = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCamelCase = PNDMScheduler.from_pretrained(lowerCamelCase_ , subfolder='''scheduler''')
UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , scheduler=lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCamelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCamelCase = torch.manual_seed(0)
UpperCamelCase = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''np''' , )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 34 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
__A = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[str] = field(
default=a , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(a )} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """The input training data file (a text file)."""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Whether ot not to use whole word mask."""} )
__magic_name__ :float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
__magic_name__ :float = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
__magic_name__ :int = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
__magic_name__ :int = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) ->Optional[int]:
"""simple docstring"""
def _dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , ref_path=_SCREAMING_SNAKE_CASE , )
return LineByLineTextDataset(tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_SCREAMING_SNAKE_CASE , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_SCREAMING_SNAKE_CASE ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCAmelCase__ :Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ :List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowerCAmelCase__ :List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
lowerCAmelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ :str = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
lowerCAmelCase__ :Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
lowerCAmelCase__ :int = AutoModelWithLMHead.from_config(_SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(_SCREAMING_SNAKE_CASE ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
lowerCAmelCase__ :Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCAmelCase__ :Any = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowerCAmelCase__ :List[str] = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCAmelCase__ :Optional[int] = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , evaluate=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCAmelCase__ :str = DataCollatorForPermutationLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCAmelCase__ :Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
else:
lowerCAmelCase__ :str = DataCollatorForLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase__ :Tuple = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , prediction_loss_only=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowerCAmelCase__ :Tuple = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_SCREAMING_SNAKE_CASE )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ :Optional[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase__ :Any = trainer.evaluate()
lowerCAmelCase__ :Optional[Any] = math.exp(eval_output['eval_loss'] )
lowerCAmelCase__ :Dict = {'perplexity': perplexity}
lowerCAmelCase__ :List[Any] = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(_SCREAMING_SNAKE_CASE )
return results
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 93 | 0 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ :Tuple = logging.get_logger(__name__)
class lowercase :
def __init__( self : str , _lowercase : str = None , _lowercase : uuid.UUID = None , _lowercase : Any=None , _lowercase : Optional[int]=None ):
if not conversation_id:
SCREAMING_SNAKE_CASE__ : Dict = uuid.uuida()
if past_user_inputs is None:
SCREAMING_SNAKE_CASE__ : Dict = []
if generated_responses is None:
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : uuid.UUID = conversation_id
SCREAMING_SNAKE_CASE__ : List[str] = past_user_inputs
SCREAMING_SNAKE_CASE__ : List[str] = generated_responses
SCREAMING_SNAKE_CASE__ : Optional[str] = text
def __eq__( self : Optional[int] , _lowercase : Dict ):
if not isinstance(_lowercase , _lowercase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowercase__ ( self : Dict , _lowercase : str , _lowercase : bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
SCREAMING_SNAKE_CASE__ : int = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = text
def lowercase__ ( self : int ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
SCREAMING_SNAKE_CASE__ : int = None
def lowercase__ ( self : List[Any] , _lowercase : str ):
self.generated_responses.append(_lowercase )
def lowercase__ ( self : Union[str, Any] ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : str ):
SCREAMING_SNAKE_CASE__ : Dict = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
SCREAMING_SNAKE_CASE__ : List[str] = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
_UpperCAmelCase , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowercase ( _UpperCAmelCase ):
def __init__( self : Dict , *_lowercase : List[str] , **_lowercase : List[Any] ):
super().__init__(*_lowercase , **_lowercase )
if self.tokenizer.pad_token_id is None:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.eos_token
def lowercase__ ( self : str , _lowercase : List[Any]=None , _lowercase : Tuple=None , _lowercase : Tuple=None , **_lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : int = {}
if min_length_for_response is not None:
SCREAMING_SNAKE_CASE__ : str = min_length_for_response
if minimum_tokens is not None:
SCREAMING_SNAKE_CASE__ : Dict = minimum_tokens
if "max_length" in generate_kwargs:
SCREAMING_SNAKE_CASE__ : Optional[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_lowercase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , _lowercase : Union[Conversation, List[Conversation]] , _lowercase : Optional[Any]=0 , **_lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Any = super().__call__(_lowercase , num_workers=_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
def lowercase__ ( self : int , _lowercase : Conversation , _lowercase : str=32 ):
if not isinstance(_lowercase , _lowercase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer._build_conversation_input_ids(_lowercase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
SCREAMING_SNAKE_CASE__ : Any = self._legacy_parse_and_tokenize(_lowercase )
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowercase__ ( self : str , _lowercase : Optional[Any] , _lowercase : Tuple=10 , **_lowercase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = generate_kwargs.get('''max_length''' , self.model.config.max_length )
SCREAMING_SNAKE_CASE__ : Tuple = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
SCREAMING_SNAKE_CASE__ : List[Any] = max_length - minimum_tokens
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs['''attention_mask'''][:, -trim:]
SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs.pop('''conversation''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_length
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate(**_lowercase , **_lowercase )
if self.model.config.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ : int = 1
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowercase__ ( self : Optional[Any] , _lowercase : str , _lowercase : int=True ):
SCREAMING_SNAKE_CASE__ : Tuple = model_outputs['''output_ids''']
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
SCREAMING_SNAKE_CASE__ : str = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(_lowercase )
return conversation
def lowercase__ ( self : Union[str, Any] , _lowercase : Conversation ):
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
if len(_lowercase ) > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : Tuple = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 35 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :torch.FloatTensor
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
@register_to_config
def __init__( self , __UpperCAmelCase = 1_6 , __UpperCAmelCase = 8_8 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 3_2 , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = "geglu" , __UpperCAmelCase = True , __UpperCAmelCase = True , ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :Dict = num_attention_heads
lowerCAmelCase__ :Any = attention_head_dim
lowerCAmelCase__ :Optional[int] = num_attention_heads * attention_head_dim
lowerCAmelCase__ :Any = in_channels
lowerCAmelCase__ :str = torch.nn.GroupNorm(num_groups=__UpperCAmelCase , num_channels=__UpperCAmelCase , eps=1E-6 , affine=__UpperCAmelCase )
lowerCAmelCase__ :int = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
# 3. Define transformers blocks
lowerCAmelCase__ :List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , dropout=__UpperCAmelCase , cross_attention_dim=__UpperCAmelCase , activation_fn=__UpperCAmelCase , attention_bias=__UpperCAmelCase , double_self_attention=__UpperCAmelCase , norm_elementwise_affine=__UpperCAmelCase , )
for d in range(__UpperCAmelCase )
] )
lowerCAmelCase__ :List[Any] = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase = True , ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = hidden_states.shape
lowerCAmelCase__ :Tuple = batch_frames // num_frames
lowerCAmelCase__ :str = hidden_states
lowerCAmelCase__ :Union[str, Any] = hidden_states[None, :].reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :str = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCAmelCase__ :Optional[int] = self.norm(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = self.proj_in(__UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowerCAmelCase__ :Optional[int] = block(
__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase , cross_attention_kwargs=__UpperCAmelCase , class_labels=__UpperCAmelCase , )
# 3. Output
lowerCAmelCase__ :Any = self.proj_out(__UpperCAmelCase )
lowerCAmelCase__ :Dict = (
hidden_states[None, None, :]
.reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCAmelCase__ :Optional[Any] = hidden_states.reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__UpperCAmelCase )
| 93 | 0 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase ( __A : Any , __A : bool = True , __A : float = math.inf , __A : float = -math.inf , __A : float = math.inf , __A : float = -math.inf , __A : bool = False , __A : float = 100 , __A : float = 0.01 , __A : float = 1 , ) -> Any:
'''simple docstring'''
snake_case : Tuple = False
snake_case : List[Any] = search_prob
snake_case : Union[str, Any] = start_temperate
snake_case : Optional[Any] = []
snake_case : Optional[int] = 0
snake_case : Any = None
while not search_end:
snake_case : Tuple = current_state.score()
if best_state is None or current_score > best_state.score():
snake_case : str = current_state
scores.append(__A )
iterations += 1
snake_case : Optional[Any] = None
snake_case : List[Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
snake_case : str = random.randint(0 , len(__A ) - 1 ) # picking a random neighbor
snake_case : Optional[Any] = neighbors.pop(__A )
snake_case : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
snake_case : List[str] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
snake_case : Dict = picked_neighbor
else:
snake_case : Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
snake_case : Union[str, Any] = picked_neighbor
snake_case : Optional[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
snake_case : Tuple = True
else:
snake_case : Tuple = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__A ) , __A )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase ( __A : int , __A : int ) -> Union[str, Any]:
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__lowercase : Union[str, Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowercase : List[Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
__lowercase : List[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowercase : str = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def lowercase ( __A : str , __A : Any ) -> Any:
'''simple docstring'''
return (3 * x**2) - (6 * y)
__lowercase : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowercase : Optional[int] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
__lowercase : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowercase : List[str] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
| 36 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=3_0 , __UpperCAmelCase=4_0_0 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 2_5_5 , __UpperCAmelCase=True , ):
'''simple docstring'''
lowerCAmelCase__ :Any = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
lowerCAmelCase__ :List[Any] = parent
lowerCAmelCase__ :int = batch_size
lowerCAmelCase__ :Union[str, Any] = num_channels
lowerCAmelCase__ :Any = min_resolution
lowerCAmelCase__ :Dict = max_resolution
lowerCAmelCase__ :Dict = do_resize
lowerCAmelCase__ :Optional[Any] = size
lowerCAmelCase__ :List[str] = do_normalize
lowerCAmelCase__ :str = image_mean
lowerCAmelCase__ :Tuple = image_std
lowerCAmelCase__ :Dict = do_rescale
lowerCAmelCase__ :Tuple = rescale_factor
lowerCAmelCase__ :Optional[int] = do_pad
def snake_case ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if not batched:
lowerCAmelCase__ :str = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = image.size
else:
lowerCAmelCase__ , lowerCAmelCase__ :str = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ :int = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase__ :List[str] = self.size['shortest_edge']
elif w > h:
lowerCAmelCase__ :Union[str, Any] = self.size['shortest_edge']
lowerCAmelCase__ :Any = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase__ :int = self.size['shortest_edge']
lowerCAmelCase__ :Union[str, Any] = self.size['shortest_edge']
else:
lowerCAmelCase__ :Optional[Any] = []
for image in image_inputs:
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ :List[str] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
lowerCAmelCase__ :List[Any] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Any = DetaImageProcessor if is_vision_available() else None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = DetaImageProcessingTester(self )
@property
def snake_case ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_rescale' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_pad' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'size' ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
lowerCAmelCase__ :Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
lowerCAmelCase__ :List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ :Tuple = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase__ :Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ :str = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ :Dict = json.loads(f.read() )
lowerCAmelCase__ :int = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
lowerCAmelCase__ :int = DetaImageProcessor()
lowerCAmelCase__ :List[Any] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ :str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ :Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCAmelCase ) )
# verify boxes
lowerCAmelCase__ :Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Dict = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ :Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCAmelCase ) )
# verify is_crowd
lowerCAmelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCAmelCase ) )
# verify class_labels
lowerCAmelCase__ :Optional[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCAmelCase ) )
# verify orig_size
lowerCAmelCase__ :str = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCAmelCase ) )
# verify size
lowerCAmelCase__ :Any = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCAmelCase ) )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ :Dict = json.loads(f.read() )
lowerCAmelCase__ :Dict = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
lowerCAmelCase__ :Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCAmelCase__ :Dict = DetaImageProcessor(format='coco_panoptic' )
lowerCAmelCase__ :Optional[int] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ :str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ :Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCAmelCase ) )
# verify boxes
lowerCAmelCase__ :int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ :Optional[int] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCAmelCase ) )
# verify is_crowd
lowerCAmelCase__ :Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCAmelCase ) )
# verify class_labels
lowerCAmelCase__ :List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCAmelCase ) )
# verify masks
lowerCAmelCase__ :Optional[int] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __UpperCAmelCase )
# verify orig_size
lowerCAmelCase__ :Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCAmelCase ) )
# verify size
lowerCAmelCase__ :Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCAmelCase ) )
| 93 | 0 |
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] ):
a__ : Dict = ""
a__ : Optional[Any] = ""
a__ : Any = []
def _UpperCamelCase( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : int ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
a__ : List[str] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
a__ : Any = self.__min_dist_top_down_dp(lowerCamelCase__ , n - 1 )
a__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , lowerCamelCase__ )
a__ : Optional[int] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
a__ : str = 1 + min(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return self.dp[m][n]
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : str ):
a__ : int = worda
a__ : List[str] = worda
a__ : Tuple = [[-1 for _ in range(len(lowerCamelCase__ ) )] for _ in range(len(lowerCamelCase__ ) )]
return self.__min_dist_top_down_dp(len(lowerCamelCase__ ) - 1 , len(lowerCamelCase__ ) - 1 )
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : str ):
a__ : List[Any] = worda
a__ : int = worda
a__ : int = len(lowerCamelCase__ )
a__ : Optional[Any] = len(lowerCamelCase__ )
a__ : Optional[int] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
a__ : Dict = j
elif j == 0: # second string is empty
a__ : Tuple = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
a__ : Tuple = self.dp[i - 1][j - 1]
else:
a__ : Tuple = self.dp[i][j - 1]
a__ : Optional[Any] = self.dp[i - 1][j]
a__ : int = self.dp[i - 1][j - 1]
a__ : Optional[int] = 1 + min(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
UpperCamelCase : Optional[Any] = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
UpperCamelCase : int = input("""Enter the first string: """).strip()
UpperCamelCase : Dict = input("""Enter the second string: """).strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 37 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = int(_SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ :int = divmod(_SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(_SCREAMING_SNAKE_CASE ) + str(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :str = str(_SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('No input value was provided' )
lowerCAmelCase__ :Dict = '-' if number.startswith('-' ) else ''
lowerCAmelCase__ :int = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return F"{negative}0b{binary_recursive(int(_SCREAMING_SNAKE_CASE ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
snake_case__ : str = path_or_paths
snake_case__ : Any = split if split or isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else """train"""
snake_case__ : int = features
snake_case__ : Optional[Any] = cache_dir
snake_case__ : List[str] = keep_in_memory
snake_case__ : Optional[Any] = streaming
snake_case__ : Optional[int] = num_proc
snake_case__ : List[str] = kwargs
@abstractmethod
def __UpperCamelCase ( self ):
pass
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
snake_case__ : str = features
snake_case__ : Tuple = cache_dir
snake_case__ : List[Any] = keep_in_memory
snake_case__ : Optional[int] = streaming
snake_case__ : str = num_proc
snake_case__ : Tuple = kwargs
@abstractmethod
def __UpperCamelCase ( self ):
pass
| 38 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ = ''''''
else:
snake_case_ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = dct.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ = val
def __SCREAMING_SNAKE_CASE ():
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=True ):
snake_case_ = ViTConfig()
# patch_size
if model_name[-1] == "8":
snake_case_ = 8
# set labels if required
if not base_model:
snake_case_ = 1000
snake_case_ = '''huggingface/label-files'''
snake_case_ = '''imagenet-1k-id2label.json'''
snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
snake_case_ = 384
snake_case_ = 1536
snake_case_ = 12
snake_case_ = 6
# load original model from torch hub
snake_case_ = torch.hub.load('''facebookresearch/dino:main''' , SCREAMING_SNAKE_CASE__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ = original_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE__ )
snake_case_ = create_rename_keys(SCREAMING_SNAKE_CASE__ , base_model=SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
if base_model:
snake_case_ = ViTModel(SCREAMING_SNAKE_CASE__ , add_pooling_layer=SCREAMING_SNAKE_CASE__ ).eval()
else:
snake_case_ = ViTForImageClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image, prepared by ViTImageProcessor
snake_case_ = ViTImageProcessor()
snake_case_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case_ = encoding['''pixel_values''']
snake_case_ = model(SCREAMING_SNAKE_CASE__ )
if base_model:
snake_case_ = original_model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
snake_case_ = original_model(SCREAMING_SNAKE_CASE__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.logits , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
lowerCAmelCase_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model) | 39 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__A = logging.get_logger(__name__)
__A = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = """deberta-v2"""
def __init__( self , __UpperCAmelCase=1_2_8_1_0_0 , __UpperCAmelCase=1_5_3_6 , __UpperCAmelCase=2_4 , __UpperCAmelCase=2_4 , __UpperCAmelCase=6_1_4_4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-7 , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=0 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=0 , __UpperCAmelCase="gelu" , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = hidden_size
lowerCAmelCase__ :Optional[int] = num_hidden_layers
lowerCAmelCase__ :Optional[int] = num_attention_heads
lowerCAmelCase__ :int = intermediate_size
lowerCAmelCase__ :Dict = hidden_act
lowerCAmelCase__ :Tuple = hidden_dropout_prob
lowerCAmelCase__ :Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ :Optional[Any] = max_position_embeddings
lowerCAmelCase__ :Optional[Any] = type_vocab_size
lowerCAmelCase__ :List[Any] = initializer_range
lowerCAmelCase__ :Dict = relative_attention
lowerCAmelCase__ :Union[str, Any] = max_relative_positions
lowerCAmelCase__ :List[str] = pad_token_id
lowerCAmelCase__ :Optional[int] = position_biased_input
# Backwards compatibility
if type(__UpperCAmelCase ) == str:
lowerCAmelCase__ :Dict = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCAmelCase__ :str = pos_att_type
lowerCAmelCase__ :Dict = vocab_size
lowerCAmelCase__ :Tuple = layer_norm_eps
lowerCAmelCase__ :Optional[Any] = kwargs.get('pooler_hidden_size' , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = pooler_dropout
lowerCAmelCase__ :int = pooler_hidden_act
class _lowerCAmelCase ( a ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ :str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase__ :Tuple = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def snake_case ( self ):
'''simple docstring'''
return 1_2
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = 3 , __UpperCAmelCase = 4_0 , __UpperCAmelCase = 4_0 , __UpperCAmelCase = None , ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = super().generate_dummy_inputs(preprocessor=__UpperCAmelCase , framework=__UpperCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 93 | 0 |
from math import isclose, sqrt
def UpperCamelCase ( snake_case__ : float , snake_case__ : float , snake_case__ : float ) -> tuple[float, float, float]:
UpperCamelCase : List[Any] = point_y / 4 / point_x
UpperCamelCase : Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
UpperCamelCase : Union[str, Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
UpperCamelCase : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
UpperCamelCase : Union[str, Any] = outgoing_gradient**2 + 4
UpperCamelCase : Optional[int] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
UpperCamelCase : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
UpperCamelCase : Tuple = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
UpperCamelCase : Dict = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
UpperCamelCase : Union[str, Any] = x_minus if isclose(snake_case__ , snake_case__ ) else x_plus
UpperCamelCase : Any = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def UpperCamelCase ( snake_case__ : float = 1.4 , snake_case__ : float = -9.6 ) -> int:
UpperCamelCase : int = 0
UpperCamelCase : float = first_x_coord
UpperCamelCase : float = first_y_coord
UpperCamelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = next_point(snake_case__ , snake_case__ , snake_case__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 40 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = FunnelConfig.from_json_file(A__ )
print(F"Building PyTorch model from configuration: {config}" )
__lowercase = FunnelBaseModel(A__ ) if base_model else FunnelModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(A__ , A__ , A__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 41 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 0 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE_ = 'AutoImageProcessor'
SCREAMING_SNAKE_CASE_ = 'AutoTokenizer'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = kwargs.pop('feature_extractor' )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.image_processor
lowerCamelCase_ = False
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = kwargs.pop('images' , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = kwargs.pop('text' , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCamelCase_ = args[0]
lowerCamelCase_ = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
lowerCamelCase_ = self.image_processor(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None:
lowerCamelCase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase_ = encodings['input_ids']
return inputs
def UpperCamelCase( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@contextmanager
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer
yield
lowerCamelCase_ = self.image_processor
lowerCamelCase_ = False
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None ) -> Tuple:
'''simple docstring'''
if added_vocab is None:
lowerCamelCase_ = self.tokenizer.get_added_vocab()
lowerCamelCase_ = {}
while tokens:
lowerCamelCase_ = re.search(r'<s_(.*?)>' , SCREAMING_SNAKE_CASE_ , re.IGNORECASE )
if start_token is None:
break
lowerCamelCase_ = start_token.group(1 )
lowerCamelCase_ = re.search(rf'''</s_{key}>''' , SCREAMING_SNAKE_CASE_ , re.IGNORECASE )
lowerCamelCase_ = start_token.group()
if end_token is None:
lowerCamelCase_ = tokens.replace(SCREAMING_SNAKE_CASE_ , '' )
else:
lowerCamelCase_ = end_token.group()
lowerCamelCase_ = re.escape(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = re.escape(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , SCREAMING_SNAKE_CASE_ , re.IGNORECASE )
if content is not None:
lowerCamelCase_ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase_ = self.tokenajson(SCREAMING_SNAKE_CASE_ , is_inner_value=SCREAMING_SNAKE_CASE_ , added_vocab=SCREAMING_SNAKE_CASE_ )
if value:
if len(SCREAMING_SNAKE_CASE_ ) == 1:
lowerCamelCase_ = value[0]
lowerCamelCase_ = value
else: # leaf nodes
lowerCamelCase_ = []
for leaf in content.split(r'<sep/>' ):
lowerCamelCase_ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase_ = leaf[1:-2] # for categorical special tokens
output[key].append(SCREAMING_SNAKE_CASE_ )
if len(output[key] ) == 1:
lowerCamelCase_ = output[key][0]
lowerCamelCase_ = tokens[tokens.find(SCREAMING_SNAKE_CASE_ ) + len(SCREAMING_SNAKE_CASE_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=SCREAMING_SNAKE_CASE_ , added_vocab=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
@property
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor
| 42 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__A = """sshleifer/bart-tiny-random"""
__A = """patrickvonplaten/t5-tiny-random"""
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
'''simple docstring'''
return AutoConfig.from_pretrained(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :List[str] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :Optional[int] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :List[Any] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=__UpperCAmelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , *lowerCAmelCase__ :Optional[int] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def snake_case ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ):
create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=__UpperCAmelCase , d=__UpperCAmelCase )
| 93 | 0 |
from math import ceil
def _a ( SCREAMING_SNAKE_CASE = 10_01 ):
"""simple docstring"""
lowercase__ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowercase__ = 2 * i + 1
lowercase__ = 2 * i
lowercase__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowerCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 43 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
__A = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , ) ->Tuple:
"""simple docstring"""
output_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , use_external_data_format=_SCREAMING_SNAKE_CASE , enable_onnx_checker=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , )
else:
export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCAmelCase__ :Tuple = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
lowerCAmelCase__ :List[Any] = 'cpu'
lowerCAmelCase__ :List[str] = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , torch_dtype=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = Path(_SCREAMING_SNAKE_CASE )
# TEXT ENCODER
lowerCAmelCase__ :str = pipeline.text_encoder.config.max_position_embeddings
lowerCAmelCase__ :Dict = pipeline.text_encoder.config.hidden_size
lowerCAmelCase__ :List[Any] = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_SCREAMING_SNAKE_CASE , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=_SCREAMING_SNAKE_CASE , )
del pipeline.text_encoder
# UNET
lowerCAmelCase__ :int = pipeline.unet.config.in_channels
lowerCAmelCase__ :Optional[Any] = pipeline.unet.config.sample_size
lowerCAmelCase__ :Dict = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(2 ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
False,
) , output_path=_SCREAMING_SNAKE_CASE , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=_SCREAMING_SNAKE_CASE , use_external_data_format=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ :List[Any] = str(unet_path.absolute().as_posix() )
lowerCAmelCase__ :int = os.path.dirname(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = onnx.load(_SCREAMING_SNAKE_CASE )
# clean up existing tensor files
shutil.rmtree(_SCREAMING_SNAKE_CASE )
os.mkdir(_SCREAMING_SNAKE_CASE )
# collate external tensor files into one
onnx.save_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , save_as_external_data=_SCREAMING_SNAKE_CASE , all_tensors_to_one_file=_SCREAMING_SNAKE_CASE , location='weights.pb' , convert_attribute=_SCREAMING_SNAKE_CASE , )
del pipeline.unet
# VAE ENCODER
lowerCAmelCase__ :int = pipeline.vae
lowerCAmelCase__ :Optional[Any] = vae_encoder.config.in_channels
lowerCAmelCase__ :int = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowerCAmelCase__ :str = lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : vae_encoder.encode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0].sample()
onnx_export(
_SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_SCREAMING_SNAKE_CASE , )
# VAE DECODER
lowerCAmelCase__ :int = pipeline.vae
lowerCAmelCase__ :List[Any] = vae_decoder.config.latent_channels
lowerCAmelCase__ :Optional[int] = vae_decoder.config.out_channels
# forward only through the decoder part
lowerCAmelCase__ :Any = vae_encoder.decode
onnx_export(
_SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_SCREAMING_SNAKE_CASE , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowerCAmelCase__ :Optional[int] = pipeline.safety_checker
lowerCAmelCase__ :Optional[int] = safety_checker.config.vision_config.num_channels
lowerCAmelCase__ :Any = safety_checker.config.vision_config.image_size
lowerCAmelCase__ :List[str] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=_SCREAMING_SNAKE_CASE , )
del pipeline.safety_checker
lowerCAmelCase__ :Union[str, Any] = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
lowerCAmelCase__ :Dict = pipeline.feature_extractor
else:
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :Optional[int] = None
lowerCAmelCase__ :List[str] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(_SCREAMING_SNAKE_CASE )
print('ONNX pipeline saved to' , _SCREAMING_SNAKE_CASE )
del pipeline
del onnx_pipeline
lowerCAmelCase__ :Dict = OnnxStableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
__A = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 93 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = (PNDMScheduler,)
lowerCAmelCase_ = (('num_inference_steps', 50),)
def lowerCamelCase_ ( self : Tuple,**__A : Any ):
_lowerCamelCase : Optional[Any] = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__A )
return config
def lowerCamelCase_ ( self : Dict,__A : Tuple=0,**__A : int ):
_lowerCamelCase : Union[str, Any] = dict(self.forward_default_kwargs )
_lowerCamelCase : Tuple = kwargs.pop("num_inference_steps",__A )
_lowerCamelCase : Tuple = self.dummy_sample
_lowerCamelCase : str = 0.1 * sample
_lowerCamelCase : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : List[str] = self.get_scheduler_config(**__A )
_lowerCamelCase : int = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals
_lowerCamelCase : Optional[int] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
_lowerCamelCase : Optional[Any] = scheduler_class.from_pretrained(__A )
new_scheduler.set_timesteps(__A )
# copy over dummy past residuals
_lowerCamelCase : Union[str, Any] = dummy_past_residuals[:]
_lowerCamelCase : Union[str, Any] = scheduler.step_prk(__A,__A,__A,**__A ).prev_sample
_lowerCamelCase : int = new_scheduler.step_prk(__A,__A,__A,**__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowerCamelCase : Tuple = scheduler.step_plms(__A,__A,__A,**__A ).prev_sample
_lowerCamelCase : int = new_scheduler.step_plms(__A,__A,__A,**__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase_ ( self : str ):
pass
def lowerCamelCase_ ( self : List[Any],__A : List[Any]=0,**__A : Union[str, Any] ):
_lowerCamelCase : Any = dict(self.forward_default_kwargs )
_lowerCamelCase : Optional[int] = kwargs.pop("num_inference_steps",__A )
_lowerCamelCase : Tuple = self.dummy_sample
_lowerCamelCase : Union[str, Any] = 0.1 * sample
_lowerCamelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Dict = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCamelCase : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
_lowerCamelCase : Optional[Any] = scheduler_class.from_pretrained(__A )
# copy over dummy past residuals
new_scheduler.set_timesteps(__A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCamelCase : Any = dummy_past_residuals[:]
_lowerCamelCase : List[Any] = scheduler.step_prk(__A,__A,__A,**__A ).prev_sample
_lowerCamelCase : Union[str, Any] = new_scheduler.step_prk(__A,__A,__A,**__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowerCamelCase : Any = scheduler.step_plms(__A,__A,__A,**__A ).prev_sample
_lowerCamelCase : Optional[Any] = new_scheduler.step_plms(__A,__A,__A,**__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase_ ( self : int,**__A : Optional[Any] ):
_lowerCamelCase : str = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config(**__A )
_lowerCamelCase : str = scheduler_class(**__A )
_lowerCamelCase : Any = 1_0
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(__A )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowerCamelCase : Dict = model(__A,__A )
_lowerCamelCase : Optional[int] = scheduler.step_prk(__A,__A,__A ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowerCamelCase : List[Any] = model(__A,__A )
_lowerCamelCase : Dict = scheduler.step_plms(__A,__A,__A ).prev_sample
return sample
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = dict(self.forward_default_kwargs )
_lowerCamelCase : Optional[Any] = kwargs.pop("num_inference_steps",__A )
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**__A )
_lowerCamelCase : Any = self.dummy_sample
_lowerCamelCase : Union[str, Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(__A,"set_timesteps" ):
scheduler.set_timesteps(__A )
elif num_inference_steps is not None and not hasattr(__A,"set_timesteps" ):
_lowerCamelCase : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCamelCase : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCamelCase : List[Any] = dummy_past_residuals[:]
_lowerCamelCase : List[Any] = scheduler.step_prk(__A,0,__A,**__A ).prev_sample
_lowerCamelCase : Dict = scheduler.step_prk(__A,1,__A,**__A ).prev_sample
self.assertEqual(output_a.shape,sample.shape )
self.assertEqual(output_a.shape,output_a.shape )
_lowerCamelCase : Dict = scheduler.step_plms(__A,0,__A,**__A ).prev_sample
_lowerCamelCase : List[Any] = scheduler.step_plms(__A,1,__A,**__A ).prev_sample
self.assertEqual(output_a.shape,sample.shape )
self.assertEqual(output_a.shape,output_a.shape )
def lowerCamelCase_ ( self : Optional[Any] ):
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__A )
def lowerCamelCase_ ( self : Optional[int] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__A )
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : List[str] = self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : Optional[int] = scheduler_class(**__A )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps,torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ),)
def lowerCamelCase_ ( self : str ):
for beta_start, beta_end in zip([0.0001, 0.001],[0.002, 0.02] ):
self.check_over_configs(beta_start=__A,beta_end=__A )
def lowerCamelCase_ ( self : List[Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__A )
def lowerCamelCase_ ( self : str ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def lowerCamelCase_ ( self : List[Any] ):
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=__A )
def lowerCamelCase_ ( self : Optional[int] ):
for t, num_inference_steps in zip([1, 5, 1_0],[1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=__A )
def lowerCamelCase_ ( self : Dict ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
_lowerCamelCase : List[str] = 2_7
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : List[Any] = self.dummy_sample
_lowerCamelCase : str = 0.1 * sample
_lowerCamelCase : Dict = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowerCamelCase : List[str] = scheduler.step_prk(__A,__A,__A ).prev_sample
def lowerCamelCase_ ( self : Union[str, Any] ):
with self.assertRaises(__A ):
_lowerCamelCase : Optional[int] = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__A )
scheduler.step_plms(self.dummy_sample,1,self.dummy_sample ).prev_sample
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : str = self.full_loop()
_lowerCamelCase : str = torch.sum(torch.abs(__A ) )
_lowerCamelCase : Any = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Dict = self.full_loop(prediction_type="v_prediction" )
_lowerCamelCase : int = torch.sum(torch.abs(__A ) )
_lowerCamelCase : int = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def lowerCamelCase_ ( self : Any ):
# We specify different beta, so that the first alpha is 0.99
_lowerCamelCase : Optional[Any] = self.full_loop(set_alpha_to_one=__A,beta_start=0.01 )
_lowerCamelCase : str = torch.sum(torch.abs(__A ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def lowerCamelCase_ ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
_lowerCamelCase : Union[str, Any] = self.full_loop(set_alpha_to_one=__A,beta_start=0.01 )
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__A ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3 | 44 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
UpperCamelCase = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def A ( lowercase__ : int , lowercase__ : Optional[Any] ) -> int:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def A ( lowercase__ : Optional[int] ) -> Any:
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=lowercase__ )
def A ( lowercase__ : str , lowercase__ : int ) -> int:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
UpperCamelCase__ :Union[str, Any] = tmp_path_factory.getbasetemp() / """cache"""
UpperCamelCase__ :Union[str, Any] = test_hf_cache_home / """datasets"""
UpperCamelCase__ :Optional[int] = test_hf_cache_home / """metrics"""
UpperCamelCase__ :Tuple = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(lowercase__ ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(lowercase__ ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(lowercase__ ) )
UpperCamelCase__ :Union[str, Any] = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(lowercase__ ) )
UpperCamelCase__ :Optional[Any] = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowercase__ ) )
@pytest.fixture(autouse=lowercase__ , scope="""session""" )
def A ( ) -> Optional[Any]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase__ )
def A ( lowercase__ : Tuple ) -> int:
# don't take tests into account when counting downloads
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , lowercase__ )
@pytest.fixture
def A ( lowercase__ : Optional[int] ) -> Dict:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , lowercase__ ) | 45 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[str]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(_SCREAMING_SNAKE_CASE ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 | 0 |
"""simple docstring"""
import sys
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : str = len(_lowerCamelCase )
_lowerCamelCase : str = [[0 for x in range(_lowerCamelCase )] for x in range(_lowerCamelCase )]
_lowerCamelCase : int = [[0 for x in range(_lowerCamelCase )] for x in range(_lowerCamelCase )]
for chain_length in range(2 , _lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
_lowerCamelCase : Tuple = a + chain_length - 1
_lowerCamelCase : Dict = sys.maxsize
for c in range(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Tuple = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_lowerCamelCase : int = cost
_lowerCamelCase : List[Any] = c
return matrix, sol
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
if i == j:
print("A" + str(_lowerCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(_lowerCamelCase , _lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(_lowerCamelCase , optimal_solution[i][j] + 1 , _lowerCamelCase )
print(")" , end=" " )
def lowerCamelCase_( ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = [30, 35, 15, 5, 10, 20, 25]
_lowerCamelCase : int = len(_lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_lowerCamelCase, _lowerCamelCase : List[Any] = matrix_chain_order(_lowerCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(_lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main() | 46 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = tempfile.mkdtemp()
lowerCAmelCase__ :List[Any] = BlipImageProcessor()
lowerCAmelCase__ :Union[str, Any] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowerCAmelCase__ :Union[str, Any] = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
lowerCAmelCase__ :List[str] = InstructBlipProcessor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).qformer_tokenizer
def snake_case ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase__ :Dict = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase__ :str = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
lowerCAmelCase__ :Dict = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.get_image_processor()
lowerCAmelCase__ :List[Any] = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase__ :str = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Dict = self.prepare_image_inputs()
lowerCAmelCase__ :List[str] = image_processor(__UpperCAmelCase , return_tensors='np' )
lowerCAmelCase__ :Optional[Any] = processor(images=__UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.get_image_processor()
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase__ :Any = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 'lower newer'
lowerCAmelCase__ :Dict = processor(text=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = qformer_tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.get_image_processor()
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
lowerCAmelCase__ :str = self.get_qformer_tokenizer()
lowerCAmelCase__ :Dict = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = 'lower newer'
lowerCAmelCase__ :Dict = self.prepare_image_inputs()
lowerCAmelCase__ :Tuple = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_image_processor()
lowerCAmelCase__ :Dict = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = self.get_qformer_tokenizer()
lowerCAmelCase__ :int = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ :Tuple = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_image_processor()
lowerCAmelCase__ :Optional[int] = self.get_tokenizer()
lowerCAmelCase__ :Dict = self.get_qformer_tokenizer()
lowerCAmelCase__ :Optional[Any] = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 'lower newer'
lowerCAmelCase__ :Optional[int] = self.prepare_image_inputs()
lowerCAmelCase__ :int = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 93 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : List[str] = 1
__a : List[Any] = 3
__a : str = (3_2, 3_2)
__a : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
return image
@property
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=SCREAMING_SNAKE_CASE__ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Optional[Any] = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : int = self.dummy_cond_unet_upscale
__a : Any = DDPMScheduler()
__a : Any = DDIMScheduler(prediction_type='v_prediction' )
__a : Any = self.dummy_vae
__a : List[str] = self.dummy_text_encoder
__a : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : Optional[Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert('RGB' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
__a : str = StableDiffusionUpscalePipeline(
unet=SCREAMING_SNAKE_CASE__ , low_res_scheduler=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , max_noise_level=3_5_0 , )
__a : List[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = 'A painting of a squirrel eating a burger'
__a : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
__a : List[str] = sd_pipe(
[prompt] , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
__a : Tuple = output.images
__a : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
__a : int = sd_pipe(
[prompt] , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
__a : Dict = image[0, -3:, -3:, -1]
__a : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
__a : Any = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__a : Dict = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : List[str] = self.dummy_cond_unet_upscale
__a : Dict = DDPMScheduler()
__a : Dict = DDIMScheduler(prediction_type='v_prediction' )
__a : int = self.dummy_vae
__a : Optional[int] = self.dummy_text_encoder
__a : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : int = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert('RGB' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
__a : Dict = StableDiffusionUpscalePipeline(
unet=SCREAMING_SNAKE_CASE__ , low_res_scheduler=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , max_noise_level=3_5_0 , )
__a : int = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a : List[str] = 'A painting of a squirrel eating a burger'
__a : str = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
__a : Any = output.images
assert image.shape[0] == 2
__a : List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
__a : int = sd_pipe(
[prompt] , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
__a : Optional[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : Dict = self.dummy_cond_unet_upscale
__a : Any = DDPMScheduler()
__a : Tuple = DDIMScheduler(prediction_type='v_prediction' )
__a : Union[str, Any] = self.dummy_vae
__a : Dict = self.dummy_text_encoder
__a : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : Dict = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert('RGB' ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
__a : str = unet.half()
__a : Any = text_encoder.half()
# make sure here that pndm scheduler skips prk
__a : Optional[Any] = StableDiffusionUpscalePipeline(
unet=SCREAMING_SNAKE_CASE__ , low_res_scheduler=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , max_noise_level=3_5_0 , )
__a : int = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a : str = 'A painting of a squirrel eating a burger'
__a : Tuple = torch.manual_seed(0 )
__a : int = sd_pipe(
[prompt] , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='np' , ).images
__a : List[str] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
__a : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
__a : List[Any] = 'stabilityai/stable-diffusion-x4-upscaler'
__a : List[Any] = StableDiffusionUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
pipe.enable_attention_slicing()
__a : Any = 'a cat sitting on a park bench'
__a : List[str] = torch.manual_seed(0 )
__a : List[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
__a : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
__a : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
__a : Dict = 'stabilityai/stable-diffusion-x4-upscaler'
__a : Any = StableDiffusionUpscalePipeline.from_pretrained(
SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa , )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
pipe.enable_attention_slicing()
__a : Optional[Any] = 'a cat sitting on a park bench'
__a : Any = torch.manual_seed(0 )
__a : List[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
__a : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
__a : Dict = 'stabilityai/stable-diffusion-x4-upscaler'
__a : str = StableDiffusionUpscalePipeline.from_pretrained(
SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa , )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a : List[Any] = 'a cat sitting on a park bench'
__a : Union[str, Any] = torch.manual_seed(0 )
__a : Dict = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=5 , output_type='np' , )
__a : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 47 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__A = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
F" reinstalling {pkg}." )
if not ops[op](version.parse(_SCREAMING_SNAKE_CASE ) , version.parse(_SCREAMING_SNAKE_CASE ) ):
raise ImportError(
F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) ->None:
"""simple docstring"""
lowerCAmelCase__ :List[str] = F"\n{hint}" if hint is not None else ''
# non-versioned check
if re.match(r'^[\w_\-\d]+$' , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = requirement, None, None
else:
lowerCAmelCase__ :List[str] = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , _SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F" got {requirement}" )
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = match[0]
lowerCAmelCase__ :List[Any] = want_full.split(',' ) # there could be multiple requirements
lowerCAmelCase__ :Any = {}
for w in want_range:
lowerCAmelCase__ :Tuple = re.findall(r'^([\s!=<>]{1,2})(.+)' , _SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F" but got {requirement}" )
lowerCAmelCase__ , lowerCAmelCase__ :int = match[0]
lowerCAmelCase__ :str = want_ver
if op not in ops:
raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
lowerCAmelCase__ :Any = '.'.join([str(_SCREAMING_SNAKE_CASE ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return
# check if any version is installed
try:
lowerCAmelCase__ :List[Any] = importlib.metadata.version(_SCREAMING_SNAKE_CASE )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 93 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A ( unittest.TestCase ):
def __init__( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int=7 , __magic_name__ : List[str]=3 , __magic_name__ : Optional[Any]=18 , __magic_name__ : Optional[Any]=30 , __magic_name__ : Any=400 , __magic_name__ : List[Any]=True , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=True , ):
"""simple docstring"""
lowerCAmelCase__ = size if size is not None else {"height": 18, "width": 18}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = apply_ocr
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , "do_resize" ) )
self.assertTrue(hasattr(__magic_name__ , "size" ) )
self.assertTrue(hasattr(__magic_name__ , "apply_ocr" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , __magic_name__ )
self.assertIsInstance(encoding.boxes , __magic_name__ )
# Test batched
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCAmelCase__ = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCAmelCase__ = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCAmelCase__ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCAmelCase__ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __magic_name__ )
self.assertListEqual(encoding.boxes , __magic_name__ )
# with apply_OCR = False
lowerCAmelCase__ = LayoutLMvaImageProcessor(apply_ocr=__magic_name__ )
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 48 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :int = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def snake_case ( self , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__UpperCAmelCase ) )
lowerCAmelCase__ :List[str] = np.random.RandomState(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.get_dummy_inputs()
lowerCAmelCase__ :Optional[int] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Union[str, Any] = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :str = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs()
lowerCAmelCase__ :Optional[Any] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :int = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# warmup pass to apply optimizations
lowerCAmelCase__ :List[Any] = pipe(**self.get_dummy_inputs() )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs()
lowerCAmelCase__ :int = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Union[str, Any] = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Any = self.get_dummy_inputs()
lowerCAmelCase__ :List[str] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Optional[int] = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_dummy_inputs()
lowerCAmelCase__ :Any = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :int = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase__ :List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Any = self.get_dummy_inputs()
lowerCAmelCase__ :List[Any] = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
lowerCAmelCase__ :Optional[Any] = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = ort.SessionOptions()
lowerCAmelCase__ :Optional[int] = False
return options
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCAmelCase__ :Any = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
lowerCAmelCase__ :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = 'A fantasy landscape, trending on artstation'
lowerCAmelCase__ :Optional[Any] = np.random.RandomState(0 )
lowerCAmelCase__ :List[str] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__UpperCAmelCase , output_type='np' , )
lowerCAmelCase__ :Any = output.images
lowerCAmelCase__ :List[str] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowerCAmelCase__ :List[Any] = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowerCAmelCase__ :Optional[Any] = init_image.resize((7_6_8, 5_1_2) )
lowerCAmelCase__ :List[Any] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowerCAmelCase__ :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = 'A fantasy landscape, trending on artstation'
lowerCAmelCase__ :List[Any] = np.random.RandomState(0 )
lowerCAmelCase__ :List[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__UpperCAmelCase , output_type='np' , )
lowerCAmelCase__ :Optional[Any] = output.images
lowerCAmelCase__ :int = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
lowerCAmelCase__ :List[Any] = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 93 | 0 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
_lowercase : str = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
_lowercase : Optional[int] = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
_lowercase : str = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :Dict ):
return float((preds == labels).mean() )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str ):
__UpperCAmelCase = simple_accuracy(snake_case_ , snake_case_ )
__UpperCAmelCase = float(fa_score(y_true=snake_case_ , y_pred=snake_case_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase__ ( snake_case_ :str , snake_case_ :List[Any] ):
__UpperCAmelCase = np.array(snake_case_ )
__UpperCAmelCase = np.array(snake_case_ )
__UpperCAmelCase = en_sentvecs.shape[0]
# mean centering
__UpperCAmelCase = en_sentvecs - np.mean(snake_case_ , axis=0 )
__UpperCAmelCase = in_sentvecs - np.mean(snake_case_ , axis=0 )
__UpperCAmelCase = cdist(snake_case_ , snake_case_ , '''cosine''' )
__UpperCAmelCase = np.array(range(snake_case_ ) )
__UpperCAmelCase = sim.argsort(axis=1 )[:, :10]
__UpperCAmelCase = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def a ( self : Union[str, Any] ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def a ( self : Any , _lowercase : List[str] , _lowercase : int ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_lowercase , _lowercase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_lowercase , _lowercase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 49 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__A = ["""gpt2"""]
__A = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :List[str] = tokenizer
lowerCAmelCase__ :Optional[int] = AutoConfig.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = TFGPTaLMHeadModel.from_config(__UpperCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = self.tokenizer(__UpperCAmelCase )
lowerCAmelCase__ :int = tokenized['input_ids'].to_tensor()
lowerCAmelCase__ :Optional[Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCAmelCase__ :int = self.model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ :List[str] = [GPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCAmelCase__ :Union[str, Any] = [TFGPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCAmelCase__ :int = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
lowerCAmelCase__ :str = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def snake_case ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowerCAmelCase__ :int = tokenizer([test_inputs] , return_tensors='tf' )
lowerCAmelCase__ :Optional[int] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCAmelCase__ :Optional[int] = python_outputs[key].numpy()
lowerCAmelCase__ :List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__UpperCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase__ :Optional[int] = tf.function(__UpperCAmelCase )
for test_inputs in self.test_sentences:
lowerCAmelCase__ :Any = tf.constant(__UpperCAmelCase )
lowerCAmelCase__ :int = compiled_tokenizer(__UpperCAmelCase )
lowerCAmelCase__ :Any = tf_tokenizer(__UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase__ :List[str] = ModelToSave(tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase__ :List[str] = model.serving(__UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCAmelCase__ :Union[str, Any] = Path(__UpperCAmelCase ) / 'saved.model'
tf.saved_model.save(__UpperCAmelCase , __UpperCAmelCase , signatures={'serving_default': model.serving} )
lowerCAmelCase__ :str = tf.saved_model.load(__UpperCAmelCase )
lowerCAmelCase__ :Dict = loaded_model.signatures['serving_default'](__UpperCAmelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase__ :List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase__ :List[str] = tf_tokenizer(__UpperCAmelCase ) # Build model with some sample inputs
lowerCAmelCase__ :Union[str, Any] = tf_tokenizer.get_config()
lowerCAmelCase__ :Tuple = TFGPTaTokenizer.from_config(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = model_from_config(__UpperCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCAmelCase__ :int = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
lowerCAmelCase__ :Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase__ :List[str] = tf_tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 93 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : str = '▁'
UpperCamelCase : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
UpperCamelCase : Optional[Any] = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
UpperCamelCase : str = {'vinai/bartpho-syllable': 10_24}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="</s>" ,_lowerCAmelCase="<s>" ,_lowerCAmelCase="<unk>" ,_lowerCAmelCase="<pad>" ,_lowerCAmelCase="<mask>" ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_lowerCAmelCase ,)
lowerCamelCase__ = vocab_file
lowerCamelCase__ = monolingual_vocab_file
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowerCamelCase__ = {}
lowerCamelCase__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
lowerCamelCase__ = cnt
cnt += 1
with open(_lowerCAmelCase ,"""r""" ,encoding="""utf-8""" ) as f:
for line in f.readlines():
lowerCamelCase__ = line.strip().split()[0]
lowerCamelCase__ = len(self.fairseq_tokens_to_ids )
if str(_lowerCAmelCase ) not in self.fairseq_tokens_to_ids:
lowerCamelCase__ = len(self.fairseq_tokens_to_ids )
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
lowerCamelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,_lowerCAmelCase ):
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
lowerCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase ,token_ids_a=_lowerCAmelCase ,already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self ):
return len(self.fairseq_ids_to_tokens )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.sp_model.encode(_lowerCAmelCase ,out_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return self.fairseq_ids_to_tokens[index]
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase ,""" """ ).strip()
return out_string
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ = os.path.join(
_lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ = os.path.join(
_lowerCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase ,"""wb""" ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_lowerCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,_lowerCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(_lowerCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 50 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __A (_SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = 384
if "tiny" in model_name:
lowerCAmelCase__ :List[Any] = [3, 3, 9, 3]
lowerCAmelCase__ :Tuple = [96, 192, 384, 768]
if "small" in model_name:
lowerCAmelCase__ :Union[str, Any] = [3, 3, 27, 3]
lowerCAmelCase__ :Any = [96, 192, 384, 768]
if "base" in model_name:
lowerCAmelCase__ :Dict = [3, 3, 27, 3]
lowerCAmelCase__ :Any = [128, 256, 512, 1024]
lowerCAmelCase__ :Union[str, Any] = 512
if "large" in model_name:
lowerCAmelCase__ :int = [3, 3, 27, 3]
lowerCAmelCase__ :Any = [192, 384, 768, 1536]
lowerCAmelCase__ :Optional[Any] = 768
if "xlarge" in model_name:
lowerCAmelCase__ :Optional[Any] = [3, 3, 27, 3]
lowerCAmelCase__ :str = [256, 512, 1024, 2048]
lowerCAmelCase__ :Union[str, Any] = 1024
# set label information
lowerCAmelCase__ :Tuple = 150
lowerCAmelCase__ :List[Any] = 'huggingface/label-files'
lowerCAmelCase__ :Tuple = 'ade20k-id2label.json'
lowerCAmelCase__ :Tuple = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ :Dict = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase__ :int = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ :List[str] = ConvNextConfig(
depths=_SCREAMING_SNAKE_CASE , hidden_sizes=_SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
lowerCAmelCase__ :Union[str, Any] = UperNetConfig(
backbone_config=_SCREAMING_SNAKE_CASE , auxiliary_in_channels=_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE , )
return config
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :str = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = val
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Dict = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
lowerCAmelCase__ :List[Any] = model_name_to_url[model_name]
lowerCAmelCase__ :Optional[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' )['state_dict']
lowerCAmelCase__ :List[Any] = get_upernet_config(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = UperNetForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCAmelCase__ :Optional[int] = state_dict.pop(_SCREAMING_SNAKE_CASE )
if "bn" in key:
lowerCAmelCase__ :Any = key.replace('bn' , 'batch_norm' )
lowerCAmelCase__ :int = val
# rename keys
lowerCAmelCase__ :Optional[Any] = create_rename_keys(_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify on image
lowerCAmelCase__ :str = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
lowerCAmelCase__ :Optional[Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
lowerCAmelCase__ :Tuple = SegformerImageProcessor()
lowerCAmelCase__ :List[Any] = processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
lowerCAmelCase__ :Optional[Any] = model(_SCREAMING_SNAKE_CASE )
if model_name == "upernet-convnext-tiny":
lowerCAmelCase__ :Optional[Any] = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
lowerCAmelCase__ :Union[str, Any] = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
lowerCAmelCase__ :Dict = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
lowerCAmelCase__ :List[str] = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
lowerCAmelCase__ :Optional[Any] = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F'''upernet-convnext-{size}''' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 93 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =VideoToVideoSDPipeline
_lowerCamelCase =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
_lowerCamelCase =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
_lowerCamelCase =PipelineTesterMixin.required_optional_params - {"latents"}
_lowerCamelCase =False
# No `output_type`.
_lowerCamelCase =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def __snake_case ( self : List[str] ):
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase = CLIPTextModel(a__ )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def __snake_case ( self : Any , a__ : int , a__ : int=0 ):
# 3 frames
UpperCAmelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
if str(a__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(a__ )
else:
UpperCAmelCase = torch.Generator(device=a__ ).manual_seed(a__ )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def __snake_case ( self : Dict ):
UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = VideoToVideoSDPipeline(**a__ )
UpperCAmelCase = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
UpperCAmelCase = self.get_dummy_inputs(a__ )
UpperCAmelCase = '''np'''
UpperCAmelCase = sd_pipe(**a__ ).frames
UpperCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __snake_case ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a__ , expected_max_diff=5e-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def __snake_case ( self : List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def __snake_case ( self : Any ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def __snake_case ( self : str ):
pass
def __snake_case ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : List[str] ):
UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase = torch.randn((1, 10, 3, 1024, 576) , generator=a__ )
UpperCAmelCase = video.to('''cuda''' )
UpperCAmelCase = '''Spiderman is surfing'''
UpperCAmelCase = pipe(a__ , video=a__ , generator=a__ , num_inference_steps=3 , output_type='''pt''' ).frames
UpperCAmelCase = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 51 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__A = logging.get_logger(__name__)
__A = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__A = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
__A = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = VOCAB_FILES_NAMES
__magic_name__ :List[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ :str = ["""input_ids""", """attention_mask"""]
__magic_name__ :Any = RobertaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Optional[int] = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
lowerCAmelCase__ :List[Any] = add_prefix_space
lowerCAmelCase__ :str = pre_tok_class(**__UpperCAmelCase )
lowerCAmelCase__ :List[str] = add_prefix_space
lowerCAmelCase__ :str = 'post_processor'
lowerCAmelCase__ :Optional[Any] = getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
if tokenizer_component_instance:
lowerCAmelCase__ :Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase__ :Any = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase__ :int = tuple(state['cls'] )
lowerCAmelCase__ :List[Any] = False
if state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Union[str, Any] = add_prefix_space
lowerCAmelCase__ :Any = True
if state.get('trim_offsets' , __UpperCAmelCase ) != trim_offsets:
lowerCAmelCase__ :Union[str, Any] = trim_offsets
lowerCAmelCase__ :Optional[int] = True
if changes_to_apply:
lowerCAmelCase__ :str = getattr(__UpperCAmelCase , state.pop('type' ) )
lowerCAmelCase__ :Any = component_class(**__UpperCAmelCase )
setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
@property
def snake_case ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else value
lowerCAmelCase__ :List[str] = value
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = [self.sep_token_id]
lowerCAmelCase__ :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 93 | 0 |
"""simple docstring"""
import baseaa
def __A ( a_ :str) -> bytes:
return baseaa.baaencode(string.encode('''utf-8'''))
def __A ( a_ :bytes) -> str:
return baseaa.baadecode(a_).decode('''utf-8''')
if __name__ == "__main__":
A = '''Hello World!'''
A = baseaa_encode(test)
print(encoded)
A = baseaa_decode(encoded)
print(decoded) | 52 |
"""simple docstring"""
from __future__ import annotations
import math
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__A = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def __A (_SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
lowerCAmelCase__ :Tuple = []
for num in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :int = 0
while 2 * i * i <= odd_composites[num]:
lowerCAmelCase__ :int = odd_composites[num] - 2 * i * i
if is_prime(_SCREAMING_SNAKE_CASE ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_SCREAMING_SNAKE_CASE ) == n:
return list_nums
return []
def __A () ->int:
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 93 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Union[str, Any]=1_0 , lowerCAmelCase_ : List[str]=[1_0, 2_0, 3_0, 4_0] , lowerCAmelCase_ : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple="relu" , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Optional[int]=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embeddings_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = len(lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def lowercase ( self : Tuple ) -> List[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ) -> str:
__lowerCAmelCase = FlaxRegNetModel(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ) -> Tuple:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
a_ = False
a_ = False
a_ = False
def lowercase ( self : Dict ) -> None:
__lowerCAmelCase = FlaxRegNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def lowercase ( self : int ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : str ) -> Union[str, Any]:
return
def lowercase ( self : Dict ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def lowercase ( self : Union[str, Any] ) -> Any:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def lowercase ( self : Tuple ) -> Tuple:
pass
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
def check_hidden_states_output(lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : str ) -> str:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Dict ):
return model(pixel_values=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest('JIT Enabled' ):
__lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCAmelCase = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCAmelCase = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='np' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 |
"""simple docstring"""
import re
def __A (_SCREAMING_SNAKE_CASE ) ->list:
"""simple docstring"""
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )]
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
try:
lowerCAmelCase__ :Any = split_input(_SCREAMING_SNAKE_CASE )
if upper:
lowerCAmelCase__ :str = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCAmelCase__ :int = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_simple_case(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
try:
lowerCAmelCase__ :str = to_simple_case(_SCREAMING_SNAKE_CASE )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '_' )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '-' )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 93 | 0 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Optional[int] =logging.get_logger(__name__)
__lowercase : Optional[Any] ={
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class A ( __lowercase ):
_snake_case ='''xlm-prophetnet'''
_snake_case =['''past_key_values''']
_snake_case ={
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self: int , _lowerCAmelCase: Optional[float] = 0.1 , _lowerCAmelCase: Optional[Union[str, Callable]] = "gelu" , _lowerCAmelCase: Optional[int] = 3_0522 , _lowerCAmelCase: Optional[int] = 1024 , _lowerCAmelCase: Optional[int] = 4096 , _lowerCAmelCase: Optional[int] = 12 , _lowerCAmelCase: Optional[int] = 16 , _lowerCAmelCase: Optional[int] = 4096 , _lowerCAmelCase: Optional[int] = 12 , _lowerCAmelCase: Optional[int] = 16 , _lowerCAmelCase: Optional[float] = 0.1 , _lowerCAmelCase: Optional[float] = 0.1 , _lowerCAmelCase: Optional[int] = 512 , _lowerCAmelCase: Optional[float] = 0.02 , _lowerCAmelCase: Optional[bool] = True , _lowerCAmelCase: Optional[bool] = True , _lowerCAmelCase: Optional[int] = 0 , _lowerCAmelCase: Optional[int] = 2 , _lowerCAmelCase: Optional[int] = 32 , _lowerCAmelCase: Optional[int] = 128 , _lowerCAmelCase: Optional[bool] = False , _lowerCAmelCase: Optional[float] = 0.0 , _lowerCAmelCase: Optional[bool] = True , _lowerCAmelCase: Optional[int] = 0 , _lowerCAmelCase: Optional[int] = 1 , _lowerCAmelCase: Optional[int] = 2 , **_lowerCAmelCase: str , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =encoder_ffn_dim
UpperCAmelCase_ =num_encoder_layers
UpperCAmelCase_ =num_encoder_attention_heads
UpperCAmelCase_ =decoder_ffn_dim
UpperCAmelCase_ =num_decoder_layers
UpperCAmelCase_ =num_decoder_attention_heads
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =init_std # Normal(0, this parameter)
UpperCAmelCase_ =activation_function
# parameters for xlmprophetnet
UpperCAmelCase_ =ngram
UpperCAmelCase_ =num_buckets
UpperCAmelCase_ =relative_max_distance
UpperCAmelCase_ =disable_ngram_loss
UpperCAmelCase_ =eps
# 3 Types of Dropout
UpperCAmelCase_ =attention_dropout
UpperCAmelCase_ =activation_dropout
UpperCAmelCase_ =dropout
UpperCAmelCase_ =use_cache
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , add_cross_attention=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
@property
def lowerCAmelCase__ ( self: List[Any] ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: Optional[int] ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 54 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
__A = """"""
__A = """"""
__A = """"""
__A = """"""
def __A (_SCREAMING_SNAKE_CASE ) ->None:
"""simple docstring"""
lowerCAmelCase__ :Any = tweepy.OAuthHandler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
auth.set_access_token(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = tweepy.API(_SCREAMING_SNAKE_CASE )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase__ :Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase__ :Optional[Any] = api.user_timeline(screen_name=_SCREAMING_SNAKE_CASE , count=200 )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# save the id of the oldest tweet less one
lowerCAmelCase__ :Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_SCREAMING_SNAKE_CASE ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase__ :Union[str, Any] = api.user_timeline(
screen_name=_SCREAMING_SNAKE_CASE , count=200 , max_id=_SCREAMING_SNAKE_CASE )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# update the id of the oldest tweet less one
lowerCAmelCase__ :Tuple = alltweets[-1].id - 1
print(F"...{len(_SCREAMING_SNAKE_CASE )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase__ :Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , 'w' ) as f:
lowerCAmelCase__ :List[str] = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 93 | 0 |
SCREAMING_SNAKE_CASE :Optional[Any] = range(2, 20 + 1)
SCREAMING_SNAKE_CASE :Optional[int] = [10**k for k in range(ks[-1] + 1)]
SCREAMING_SNAKE_CASE :dict[int, dict[int, list[list[int]]]] = {}
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
__A = sum(a_i[j] for j in range(a_ , len(a_ ) ) )
__A = sum(a_i[j] * base[j] for j in range(min(len(a_ ) , a_ ) ) )
__A , __A = 0, 0
__A = n - i
__A = memo.get(a_ )
if sub_memo is not None:
__A = sub_memo.get(a_ )
if jumps is not None and len(a_ ) > 0:
# find and make the largest jump without going over
__A = -1
for _k in range(len(a_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__A = _k
break
if max_jump >= 0:
__A , __A , __A = jumps[max_jump]
# since the difference between jumps is cached, add c
__A = diff + c
for j in range(min(a_ , len(a_ ) ) ):
__A , __A = divmod(a_ , 1_0 )
if new_c > 0:
add(a_ , a_ , a_ )
else:
__A = []
else:
__A = {c: []}
__A = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__A , __A = next_term(a_ , k - 1 , i + dn , a_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__A , __A = compute(a_ , a_ , i + dn , a_ )
diff += _diff
dn += terms_jumped
__A = sub_memo[c]
# keep jumps sorted by # of terms skipped
__A = 0
while j < len(a_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(a_ , (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(a_ ):
a_i.extend([0 for _ in range(k - len(a_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__A = i
__A , __A , __A = 0, 0, 0
for j in range(len(a_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__A = ds_c + ds_b
diff += addend
__A = 0
for j in range(a_ ):
__A = a_i[j] + addend
__A , __A = divmod(a_ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(a_ , a_ , a_ )
return diff, i - start_i
def UpperCAmelCase ( a_ , a_ , a_ ) -> Any:
"""simple docstring"""
for j in range(a_ , len(a_ ) ):
__A = digits[j] + addend
if s >= 1_0:
__A , __A = divmod(a_ , 1_0 )
__A = addend // 1_0 + quotient
else:
__A = s
__A = addend // 1_0
if addend == 0:
break
while addend > 0:
__A , __A = divmod(a_ , 1_0 )
digits.append(a_ )
def UpperCAmelCase ( a_ = 1_0**1_5 ) -> int:
"""simple docstring"""
__A = [1]
__A = 1
__A = 0
while True:
__A , __A = next_term(a_ , 2_0 , i + dn , a_ )
dn += terms_jumped
if dn == n - i:
break
__A = 0
for j in range(len(a_ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCAmelCase ( a , a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :int = StableUnCLIPPipeline
__magic_name__ :int = TEXT_TO_IMAGE_PARAMS
__magic_name__ :List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ :Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__ :Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__magic_name__ :List[str] = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 3_2
lowerCAmelCase__ :List[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=__UpperCAmelCase , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ :int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=__UpperCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=__UpperCAmelCase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase__ :Dict = StableUnCLIPImageNormalizer(embedding_dim=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase__ :str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__UpperCAmelCase , layers_per_block=1 , upcast_attention=__UpperCAmelCase , use_linear_projection=__UpperCAmelCase , )
torch.manual_seed(0 )
lowerCAmelCase__ :Tuple = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = AutoencoderKL()
lowerCAmelCase__ :Optional[int] = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :str = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[int] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__UpperCAmelCase )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
lowerCAmelCase__ :List[Any] = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ :List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ :List[str] = pipe('anime turle' , generator=__UpperCAmelCase , output_type='np' )
lowerCAmelCase__ :Union[str, Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ :int = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
lowerCAmelCase__ :List[str] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ :Tuple = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ :str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 93 | 0 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _a (lowercase__ : Optional[Any] ) -> Any:
"""simple docstring"""
if hor == 1_2_8:
__snake_case = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
__snake_case = (3_2, 1_2_8, 2_5_6)
__snake_case = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 3_2:
__snake_case = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
__snake_case = (3_2, 6_4, 1_2_8, 2_5_6)
__snake_case = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
__snake_case = torch.load(f'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
__snake_case = model.state_dict()
__snake_case = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 1_4,
'out_channels': 1_4,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_5_5_3_6,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
__snake_case = UNetaDModel(**lowercase__ )
print(f'length of state dict: {len(state_dict.keys() )}' )
print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
__snake_case = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__snake_case = state_dict.pop(lowercase__ )
hf_value_function.load_state_dict(lowercase__ )
torch.save(hf_value_function.state_dict() , f'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(f'hub/hopper-medium-v2/unet/hor{hor}/config.json' , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
def _a () -> List[str]:
"""simple docstring"""
__snake_case = {
'in_channels': 1_4,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (3_2, 6_4, 1_2_8, 2_5_6),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_5_5_3_6,
'out_channels': 1_4,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
__snake_case = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
__snake_case = model
__snake_case = UNetaDModel(**lowercase__ )
print(f'length of state dict: {len(state_dict.keys() )}' )
print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
__snake_case = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__snake_case = state_dict.pop(lowercase__ )
hf_value_function.load_state_dict(lowercase__ )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 56 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
__A = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[str] = field(
default=a , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(a )} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """The input training data file (a text file)."""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Whether ot not to use whole word mask."""} )
__magic_name__ :float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
__magic_name__ :float = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
__magic_name__ :int = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
__magic_name__ :int = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) ->Optional[int]:
"""simple docstring"""
def _dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , ref_path=_SCREAMING_SNAKE_CASE , )
return LineByLineTextDataset(tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_SCREAMING_SNAKE_CASE , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_SCREAMING_SNAKE_CASE ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCAmelCase__ :Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ :List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowerCAmelCase__ :List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
lowerCAmelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ :str = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
lowerCAmelCase__ :Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
lowerCAmelCase__ :int = AutoModelWithLMHead.from_config(_SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(_SCREAMING_SNAKE_CASE ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
lowerCAmelCase__ :Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCAmelCase__ :Any = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowerCAmelCase__ :List[str] = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCAmelCase__ :Optional[int] = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , evaluate=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCAmelCase__ :str = DataCollatorForPermutationLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCAmelCase__ :Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
else:
lowerCAmelCase__ :str = DataCollatorForLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase__ :Tuple = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , prediction_loss_only=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowerCAmelCase__ :Tuple = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_SCREAMING_SNAKE_CASE )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ :Optional[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase__ :Any = trainer.evaluate()
lowerCAmelCase__ :Optional[Any] = math.exp(eval_output['eval_loss'] )
lowerCAmelCase__ :Dict = {'perplexity': perplexity}
lowerCAmelCase__ :List[Any] = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(_SCREAMING_SNAKE_CASE )
return results
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 93 | 0 |
A_ : Dict = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
A_ : Optional[Any] = ['a', 'b', 'c', 'd', 'e']
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
UpperCamelCase_: int = start
# add current to visited
visited.append(UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
UpperCamelCase_: Optional[Any] = topological_sort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# if all neighbors visited add current to sort
sort.append(UpperCAmelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
for vertice in vertices:
if vertice not in visited:
UpperCamelCase_: Union[str, Any] = topological_sort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# return sort
return sort
if __name__ == "__main__":
A_ : Dict = topological_sort('a', [], [])
print(sort) | 57 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :torch.FloatTensor
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
@register_to_config
def __init__( self , __UpperCAmelCase = 1_6 , __UpperCAmelCase = 8_8 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 3_2 , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = "geglu" , __UpperCAmelCase = True , __UpperCAmelCase = True , ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :Dict = num_attention_heads
lowerCAmelCase__ :Any = attention_head_dim
lowerCAmelCase__ :Optional[int] = num_attention_heads * attention_head_dim
lowerCAmelCase__ :Any = in_channels
lowerCAmelCase__ :str = torch.nn.GroupNorm(num_groups=__UpperCAmelCase , num_channels=__UpperCAmelCase , eps=1E-6 , affine=__UpperCAmelCase )
lowerCAmelCase__ :int = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
# 3. Define transformers blocks
lowerCAmelCase__ :List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , dropout=__UpperCAmelCase , cross_attention_dim=__UpperCAmelCase , activation_fn=__UpperCAmelCase , attention_bias=__UpperCAmelCase , double_self_attention=__UpperCAmelCase , norm_elementwise_affine=__UpperCAmelCase , )
for d in range(__UpperCAmelCase )
] )
lowerCAmelCase__ :List[Any] = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase = True , ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = hidden_states.shape
lowerCAmelCase__ :Tuple = batch_frames // num_frames
lowerCAmelCase__ :str = hidden_states
lowerCAmelCase__ :Union[str, Any] = hidden_states[None, :].reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :str = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCAmelCase__ :Optional[int] = self.norm(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = self.proj_in(__UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowerCAmelCase__ :Optional[int] = block(
__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase , cross_attention_kwargs=__UpperCAmelCase , class_labels=__UpperCAmelCase , )
# 3. Output
lowerCAmelCase__ :Any = self.proj_out(__UpperCAmelCase )
lowerCAmelCase__ :Dict = (
hidden_states[None, None, :]
.reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCAmelCase__ :Optional[Any] = hidden_states.reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__UpperCAmelCase )
| 93 | 0 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=False , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=1_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ) -> Dict:
'''simple docstring'''
snake_case_ : Any = parent
snake_case_ : str = batch_size
snake_case_ : Dict = seq_length
snake_case_ : List[str] = is_training
snake_case_ : Tuple = use_input_mask
snake_case_ : str = use_token_type_ids
snake_case_ : Any = use_labels
snake_case_ : Any = vocab_size
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : List[str] = hidden_act
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : Optional[Any] = type_vocab_size
snake_case_ : List[str] = type_sequence_label_size
snake_case_ : str = initializer_range
snake_case_ : List[str] = num_labels
snake_case_ : Optional[Any] = num_choices
snake_case_ : str = scope
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : int = None
if self.use_input_mask:
snake_case_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[Any] = None
snake_case_ : Optional[Any] = None
snake_case_ : Optional[int] = None
if self.use_labels:
snake_case_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : str = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Dict = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_lowercase , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , )
return config
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = EsmForProteinFolding(config=_lowercase ).float()
model.to(_lowercase )
model.eval()
snake_case_ : Tuple = model(_lowercase , attention_mask=_lowercase )
snake_case_ : int = model(_lowercase )
snake_case_ : Any = model(_lowercase )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : Dict = config_and_inputs
snake_case_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = False
_lowerCamelCase = (EsmForProteinFolding,) if is_torch_available() else ()
_lowerCamelCase = ()
_lowerCamelCase = {} if is_torch_available() else {}
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : str = EsmFoldModelTester(self )
snake_case_ : Union[str, Any] = ConfigTester(self , config_class=_lowercase , hidden_size=3_7 )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip("""Does not support attention outputs""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold only has one output format.""" )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support data parallel.""" )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
@require_torch
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
snake_case_ : Any = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
snake_case_ : Tuple = model(_lowercase )["""positions"""]
snake_case_ : Any = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _lowercase , atol=1E-4 ) )
| 58 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=3_0 , __UpperCAmelCase=4_0_0 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 2_5_5 , __UpperCAmelCase=True , ):
'''simple docstring'''
lowerCAmelCase__ :Any = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
lowerCAmelCase__ :List[Any] = parent
lowerCAmelCase__ :int = batch_size
lowerCAmelCase__ :Union[str, Any] = num_channels
lowerCAmelCase__ :Any = min_resolution
lowerCAmelCase__ :Dict = max_resolution
lowerCAmelCase__ :Dict = do_resize
lowerCAmelCase__ :Optional[Any] = size
lowerCAmelCase__ :List[str] = do_normalize
lowerCAmelCase__ :str = image_mean
lowerCAmelCase__ :Tuple = image_std
lowerCAmelCase__ :Dict = do_rescale
lowerCAmelCase__ :Tuple = rescale_factor
lowerCAmelCase__ :Optional[int] = do_pad
def snake_case ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if not batched:
lowerCAmelCase__ :str = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = image.size
else:
lowerCAmelCase__ , lowerCAmelCase__ :str = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ :int = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase__ :List[str] = self.size['shortest_edge']
elif w > h:
lowerCAmelCase__ :Union[str, Any] = self.size['shortest_edge']
lowerCAmelCase__ :Any = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase__ :int = self.size['shortest_edge']
lowerCAmelCase__ :Union[str, Any] = self.size['shortest_edge']
else:
lowerCAmelCase__ :Optional[Any] = []
for image in image_inputs:
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ :List[str] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
lowerCAmelCase__ :List[Any] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Any = DetaImageProcessor if is_vision_available() else None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = DetaImageProcessingTester(self )
@property
def snake_case ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_rescale' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_pad' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'size' ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
lowerCAmelCase__ :Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
lowerCAmelCase__ :List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ :Tuple = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase__ :Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase__ :str = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ :Dict = json.loads(f.read() )
lowerCAmelCase__ :int = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
lowerCAmelCase__ :int = DetaImageProcessor()
lowerCAmelCase__ :List[Any] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ :str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ :Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCAmelCase ) )
# verify boxes
lowerCAmelCase__ :Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Dict = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ :Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCAmelCase ) )
# verify is_crowd
lowerCAmelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCAmelCase ) )
# verify class_labels
lowerCAmelCase__ :Optional[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCAmelCase ) )
# verify orig_size
lowerCAmelCase__ :str = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCAmelCase ) )
# verify size
lowerCAmelCase__ :Any = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCAmelCase ) )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCAmelCase__ :Dict = json.loads(f.read() )
lowerCAmelCase__ :Dict = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
lowerCAmelCase__ :Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCAmelCase__ :Dict = DetaImageProcessor(format='coco_panoptic' )
lowerCAmelCase__ :Optional[int] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors='pt' )
# verify pixel values
lowerCAmelCase__ :str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase__ :Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCAmelCase ) )
# verify boxes
lowerCAmelCase__ :int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCAmelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ :Optional[int] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCAmelCase ) )
# verify is_crowd
lowerCAmelCase__ :Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCAmelCase ) )
# verify class_labels
lowerCAmelCase__ :List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCAmelCase ) )
# verify masks
lowerCAmelCase__ :Optional[int] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __UpperCAmelCase )
# verify orig_size
lowerCAmelCase__ :Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCAmelCase ) )
# verify size
lowerCAmelCase__ :Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCAmelCase ) )
| 93 | 0 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase_ ( __a ) -> float:
"""simple docstring"""
return np.dot(__a , __a )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : List[str] , *,
UpperCAmelCase_ : float = np.inf , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : float = 0.0 , ) ->None:
'''simple docstring'''
lowerCamelCase__: Dict =regularization
lowerCamelCase__: Any =gamma
if kernel == "linear":
lowerCamelCase__: Dict =self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma")
if not isinstance(self.gamma , (float, int)):
raise ValueError("gamma must be float or int")
if not self.gamma > 0:
raise ValueError("gamma must be > 0")
lowerCamelCase__: Tuple =self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCamelCase__: Optional[Any] =F"""Unknown kernel: {kernel}"""
raise ValueError(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray) ->float:
'''simple docstring'''
return np.dot(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : ndarray , UpperCAmelCase_ : ndarray) ->float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : list[ndarray] , UpperCAmelCase_ : ndarray) ->None:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =observations
lowerCamelCase__: Optional[int] =classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowerCamelCase__) , ): List[str] =np.shape(UpperCAmelCase_)
def to_minimize(UpperCAmelCase_ : ndarray) -> float:
lowerCamelCase__: int =0
((lowerCamelCase__) , ): Optional[Any] =np.shape(UpperCAmelCase_)
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(UpperCAmelCase_)
lowerCamelCase__: List[Any] =LinearConstraint(UpperCAmelCase_ , 0 , 0)
lowerCamelCase__: str =Bounds(0 , self.regularization)
lowerCamelCase__: Union[str, Any] =minimize(
UpperCAmelCase_ , np.ones(UpperCAmelCase_) , bounds=UpperCAmelCase_ , constraints=[ly_contraint]).x
lowerCamelCase__: str =l_star
# calculating mean offset of separation plane to points
lowerCamelCase__: Tuple =0
for i in range(UpperCAmelCase_):
for j in range(UpperCAmelCase_):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
lowerCamelCase__: int =s / n
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : ndarray) ->int:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCAmelCase_)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = int(_SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ :int = divmod(_SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(_SCREAMING_SNAKE_CASE ) + str(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :str = str(_SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('No input value was provided' )
lowerCAmelCase__ :Dict = '-' if number.startswith('-' ) else ''
lowerCAmelCase__ :int = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return F"{negative}0b{binary_recursive(int(_SCREAMING_SNAKE_CASE ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.